Merge "Add response schema validation for volume pools"
diff --git a/.gitignore b/.gitignore
index 06a2281..8b6222e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,9 @@
+# Don't add patterns to exclude files created by preferred personal tools
+# (editors, IDEs, your operating system itself even). These should instead be
+# maintained outside the repository, for example in a ~/.gitignore file added
+# with:
+#
+# git config --global core.excludesfile '~/.gitignore'
AUTHORS
ChangeLog
*.pyc
@@ -25,7 +31,7 @@
!.coveragerc
cover/
doc/source/_static/tempest.conf.sample
-doc/source/plugin-registry.rst
+doc/source/plugins/plugin-registry.rst
# Files created by releasenotes build
releasenotes/build
diff --git a/.gitreview b/.gitreview
index 84b5114..a475594 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=review.openstack.org
+host=review.opendev.org
port=29418
project=openstack/tempest.git
diff --git a/.zuul.yaml b/.zuul.yaml
index 5a649e4..87e277c 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,19 +1,18 @@
- job:
name: devstack-tempest
parent: devstack
- nodeset: openstack-single-node
description: |
Base Tempest job.
This Tempest job provides the base for both the single and multi-node
test setup. To run a multi-node test inherit from devstack-tempest and
set the nodeset to a multi-node one.
- required-projects:
- - git.openstack.org/openstack/tempest
+ required-projects: &base_required-projects
+ - opendev.org/openstack/tempest
timeout: 7200
- roles:
- - zuul: git.openstack.org/openstack-dev/devstack
- vars:
+ roles: &base_roles
+ - zuul: opendev.org/openstack/devstack
+ vars: &base_vars
devstack_services:
tempest: true
devstack_local_conf:
@@ -38,6 +37,54 @@
post-run: playbooks/post-tempest.yaml
- job:
+ name: tempest-all
+ parent: devstack-tempest
+ description: |
+ Integration test that runs all tests.
+ Former name for this job was:
+ * legacy-periodic-tempest-dsvm-all-master
+ vars:
+ tox_envlist: all
+ tempest_test_regex: tempest
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+
+- job:
+ name: devstack-tempest-ipv6
+ parent: devstack-ipv6
+ description: |
+ Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
+ which set the IPv6-only setting for OpenStack services. As part of
+ run phase, this job will verify the IPv6 setting and check the services
+ endpoints and listen addresses are IPv6. Basically it will run the script
+ ./tool/verify-ipv6-only-deployments.sh
+
+ Child jobs of this job can run their own set of tests and can
+ add post-run playebooks to extend the IPv6 verification specific
+ to their deployed services.
+ Check the wiki page for more details about project jobs setup
+ - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
+ required-projects: *base_required-projects
+ timeout: 7200
+ roles: *base_roles
+ vars: *base_vars
+ run: playbooks/devstack-tempest-ipv6.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: tempest-ipv6-only
+ parent: devstack-tempest-ipv6
+ # This currently works from stable/pike on.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Integration test of IPv6-only deployments. This job runs
+ smoke and IPv6 relates tests only. Basic idea is to test
+ whether OpenStack Services listen on IPv6 addrress or not.
+ timeout: 10800
+ vars:
+ tox_envlist: ipv6-only
+
+- job:
name: tempest-full
parent: devstack-tempest
# This currently works from stable/pike on.
@@ -53,6 +100,49 @@
tox_envlist: full
devstack_localrc:
ENABLE_FILE_INJECTION: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ USE_PYTHON3: False
+ devstack_services:
+ # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+ # since tempest-full is in the integrated-gate project template but
+ # the backup tests do not really involve other services so they should
+ # be run in some more cinder-specific job, especially because the
+ # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+ c-bak: false
+
+- job:
+ name: tempest-full-oslo-master
+ parent: tempest-full
+ description: |
+ Integration test using current git of oslo libs.
+ This ensures that when oslo libs get released that they
+ do not break OpenStack server projects.
+
+ Former name for this job was
+ periodic-tempest-dsvm-oslo-latest-full-master.
+ timeout: 10800
+ required-projects:
+ - opendev.org/openstack/oslo.cache
+ - opendev.org/openstack/oslo.concurrency
+ - opendev.org/openstack/oslo.config
+ - opendev.org/openstack/oslo.context
+ - opendev.org/openstack/oslo.db
+ - opendev.org/openstack/oslo.i18n
+ - opendev.org/openstack/oslo.log
+ - opendev.org/openstack/oslo.messaging
+ - opendev.org/openstack/oslo.middleware
+ - opendev.org/openstack/oslo.policy
+ - opendev.org/openstack/oslo.privsep
+ - opendev.org/openstack/oslo.reports
+ - opendev.org/openstack/oslo.rootwrap
+ - opendev.org/openstack/oslo.serialization
+ - opendev.org/openstack/oslo.service
+ - opendev.org/openstack/oslo.utils
+ - opendev.org/openstack/oslo.versionedobjects
+ - opendev.org/openstack/oslo.vmware
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: True
- job:
name: tempest-full-parallel
@@ -61,11 +151,16 @@
branches:
- master
description: |
- Base integration test with Neutron networking and py27.
+ Base integration test with Neutron networking.
It includes all scenarios as it was in the past.
This job runs all scenario tests in parallel!
+ timeout: 9000
vars:
tox_envlist: full-parallel
+ run_tempest_cleanup: true
+ run_tempest_dry_cleanup: true
+ devstack_localrc:
+ USE_PYTHON3: True
- job:
name: tempest-full-py3
@@ -84,6 +179,128 @@
devstack_localrc:
USE_PYTHON3: true
FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ # NOTE(mriedem): Disable the cinder-backup service from
+ # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+ # project template but the backup tests do not really involve other
+ # services so they should be run in some more cinder-specific job,
+ # especially because the tests fail at a high rate (see bugs 1483434,
+ # 1813217, 1745168)
+ c-bak: false
+
+- job:
+ name: tempest-integrated-networking
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for networking. This is subset of
+ 'tempest-full' job and run only Neutron and Nova related tests.
+ This is meant to be run on neutron gate only.
+ vars:
+ tox_envlist: integrated-network
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-compute
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for compute. This is
+ subset of 'tempest-full' job and run Nova, Neutron, Cinder (except backup tests)
+ and Glance related tests. This is meant to be run on Nova gate only.
+ vars:
+ tox_envlist: integrated-compute
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-placement
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for placement. This is
+ subset of 'tempest-full' job and run Nova and Neutron
+ related tests. This is meant to be run on Placement gate only.
+ vars:
+ tox_envlist: integrated-placement
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-storage
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for image & block storage. This is
+ subset of 'tempest-full' job and run Cinder, Glance, Swift and Nova
+ related tests. This is meant to be run on Cinder and Glance gate only.
+ vars:
+ tox_envlist: integrated-storage
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+ name: tempest-integrated-object-storage
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for object storage. This is
+ subset of 'tempest-full' job and run Swift, Cinder and Glance
+ related tests. This is meant to be run on Swift gate only.
+ vars:
+ tox_envlist: integrated-object-storage
+ devstack_localrc:
+ # NOTE(gmann): swift is not ready on python3 yet and devstack
+ # install it on python2.7 only. But settting the USE_PYTHON3
+ # for future once swift is ready on py3.
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-full-py3-ipv6
+ parent: devstack-tempest-ipv6
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking, IPv6 and py3.
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
devstack_services:
s-account: false
s-container: false
@@ -93,12 +310,8 @@
c-bak: false
- job:
- name: tempest-multinode-full
+ name: tempest-multinode-full-base
parent: devstack-tempest
- nodeset: openstack-two-node
- # Until the devstack changes are backported, only run this on master
- branches:
- - master
description: |
Base multinode integration test with Neutron networking and py27.
Former names for this job were:
@@ -123,35 +336,67 @@
LIVE_MIGRATION_AVAILABLE: true
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-- nodeset:
- name: openstack-bionic-node
- nodes:
- - name: controller
- label: ubuntu-bionic
- groups:
- - name: tempest
- nodes:
- - controller
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-bionic
+ # This job runs on Bionic from stable/stein on.
+ branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
- job:
- name: tempest-full-py36
- parent: tempest-full-py3
- nodeset: openstack-bionic-node
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-xenial
+ # This job runs on Xenial and this is for stable/pike, stable/queens
+ # and stable/rocky. This job is prepared to make sure all stable branches
+ # before stable/stein will keep running on xenial. This job can be
+ # removed once stable/rocky is EOL.
branches:
- - master
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full-py3
+ parent: tempest-multinode-full
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-full-py3-opensuse15
+ parent: tempest-full-py3
+ nodeset: devstack-single-node-opensuse-15
description: |
- Base integration test with Neutron networking and py36.
+ Base integration test with Neutron networking and py36 running
+ on openSUSE Leap 15.x
voting: false
- job:
name: tempest-slow
parent: tempest-multinode-full
- branches:
- - master
description: |
This multinode integration job will run all the tests tagged as slow.
It enables the lvm multibackend setup to cover few scenario tests.
- This job will run only slow tests(API or Scenario) serially.
+ This job will run only slow tests (API or Scenario) serially.
Former names for this job were:
* legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
@@ -161,115 +406,73 @@
tox_envlist: slow-serial
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
tempest_concurrency: 2
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
- job:
- name: tempest-full-rocky
- parent: tempest-full
- override-checkout: stable/rocky
+ name: tempest-slow-py3
+ parent: tempest-slow
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
- job:
- name: tempest-full-rocky-py3
+ name: tempest-full-ussuri-py3
parent: tempest-full-py3
- override-checkout: stable/rocky
+ override-checkout: stable/ussuri
- job:
- name: tempest-full-queens
- parent: tempest-full
- override-checkout: stable/queens
-
-- job:
- name: tempest-full-queens-py3
+ name: tempest-full-train-py3
parent: tempest-full-py3
- override-checkout: stable/queens
+ override-checkout: stable/train
- job:
- name: tempest-full-pike
- parent: tempest-full
- override-checkout: stable/pike
+ name: tempest-full-stein-py3
+ parent: tempest-full-py3
+ override-checkout: stable/stein
- job:
name: tempest-tox-plugin-sanity-check
parent: tox
description: |
Run tempest plugin sanity check script using tox.
- nodeset: ubuntu-xenial
+ nodeset: ubuntu-bionic
vars:
tox_envlist: plugin-sanity-check
- voting: false
timeout: 5000
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- required-projects:
- - git.openstack.org/openstack/almanach
- - git.openstack.org/openstack/aodh
- - git.openstack.org/openstack/barbican-tempest-plugin
- - git.openstack.org/openstack/ceilometer
- - git.openstack.org/openstack/cinder
- - git.openstack.org/openstack/congress
- - git.openstack.org/openstack/designate-tempest-plugin
- - git.openstack.org/openstack/ec2-api
- - git.openstack.org/openstack/freezer
- - git.openstack.org/openstack/freezer-api
- - git.openstack.org/openstack/freezer-tempest-plugin
- - git.openstack.org/openstack/gce-api
- - git.openstack.org/openstack/glare
- - git.openstack.org/openstack/heat
- - git.openstack.org/openstack/intel-nfv-ci-tests
- - git.openstack.org/openstack/ironic
- - git.openstack.org/openstack/ironic-inspector
- - git.openstack.org/openstack/keystone-tempest-plugin
- - git.openstack.org/openstack/kingbird
- - git.openstack.org/openstack/kuryr-tempest-plugin
- - git.openstack.org/openstack/magnum
- - git.openstack.org/openstack/magnum-tempest-plugin
- - git.openstack.org/openstack/manila
- - git.openstack.org/openstack/manila-tempest-plugin
- - git.openstack.org/openstack/mistral
- - git.openstack.org/openstack/mogan
- - git.openstack.org/openstack/monasca-api
- - git.openstack.org/openstack/monasca-log-api
- - git.openstack.org/openstack/murano
- - git.openstack.org/openstack/networking-bgpvpn
- - git.openstack.org/openstack/networking-cisco
- - git.openstack.org/openstack/networking-fortinet
- - git.openstack.org/openstack/networking-generic-switch
- - git.openstack.org/openstack/networking-l2gw
- - git.openstack.org/openstack/networking-midonet
- - git.openstack.org/openstack/networking-plumgrid
- - git.openstack.org/openstack/networking-sfc
- - git.openstack.org/openstack/neutron
- - git.openstack.org/openstack/neutron-dynamic-routing
- - git.openstack.org/openstack/neutron-fwaas
- - git.openstack.org/openstack/neutron-lbaas
- - git.openstack.org/openstack/neutron-tempest-plugin
- - git.openstack.org/openstack/neutron-vpnaas
- - git.openstack.org/openstack/nova-lxd
- - git.openstack.org/openstack/novajoin-tempest-plugin
- - git.openstack.org/openstack/octavia-tempest-plugin
- - git.openstack.org/openstack/oswin-tempest-plugin
- - git.openstack.org/openstack/panko
- - git.openstack.org/openstack/patrole
- - git.openstack.org/openstack/qinling
- - git.openstack.org/openstack/requirements
- - git.openstack.org/openstack/sahara-tests
- - git.openstack.org/openstack/senlin
- - git.openstack.org/openstack/senlin-tempest-plugin
- - git.openstack.org/openstack/tap-as-a-service
- - git.openstack.org/openstack/tempest-horizon
- - git.openstack.org/openstack/trio2o
- - git.openstack.org/openstack/trove
- - git.openstack.org/openstack/valet
- - git.openstack.org/openstack/vitrage
- - git.openstack.org/openstack/vmware-nsx-tempest-plugin
- - git.openstack.org/openstack/watcher-tempest-plugin
- - git.openstack.org/openstack/zaqar-tempest-plugin
- - git.openstack.org/openstack/zun-tempest-plugin
- job:
name: tempest-cinder-v2-api
@@ -284,15 +487,130 @@
devstack_localrc:
TEMPEST_VOLUME_TYPE: volumev2
+- job:
+ name: tempest-full-test-account-py3
+ parent: tempest-full-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials instead of dynamic credentials and py3.
+ Former names for this job were:
+ - legacy-tempest-dsvm-full-test-accounts
+ - legacy-tempest-dsvm-neutron-full-test-accounts
+ - legacy-tempest-dsvm-identity-v3-test-accounts
+ vars:
+ devstack_localrc:
+ TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+ name: tempest-full-test-account-no-admin-py3
+ parent: tempest-full-test-account-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials and py3 without having an admin account.
+ Former name for this job was:
+ - legacy-tempest-dsvm-neutron-full-non-admin
+
+ vars:
+ devstack_localrc:
+ TEMPEST_HAS_ADMIN: False
+
+- job:
+ name: tempest-pg-full
+ parent: tempest-full
+ description: |
+ Base integration test with Neutron networking and PostgreSQL.
+ Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+ vars:
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+ DATABASE_TYPE: postgresql
+ USE_PYTHON3: True
+
+- project-template:
+ name: integrated-gate-networking
+ description: |
+ Run the python3 Tempest network integration tests (Nova and Neutron related)
+ in check and gate for the neutron integrated gate. This is meant to be
+ run on neutron gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-networking
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-networking
+
+- project-template:
+ name: integrated-gate-compute
+ description: |
+ Run the python3 Tempest compute integration tests
+ (Nova, Neutron, Cinder and Glance related) in check and gate
+ for the Nova integrated gate. This is meant to be
+ run on Nova gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-compute
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-compute
+
+- project-template:
+ name: integrated-gate-placement
+ description: |
+ Run the python3 Tempest placement integration tests
+ (Nova and Neutron related) in check and gate
+ for the Placement integrated gate. This is meant to be
+ run on Placement gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-placement
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-placement
+
+- project-template:
+ name: integrated-gate-storage
+ description: |
+ Run the python3 Tempest image & block storage integration tests
+ (Cinder, Glance, Swift and Nova related) in check and gate
+ for the neutron integrated gate. This is meant to be
+ run on Cinder and Glance gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-storage
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-storage
+
+- project-template:
+ name: integrated-gate-object-storage
+ description: |
+ Run the python3 Tempest object storage integration tests
+ (Swift, Cinder and Glance related) in check and gate
+ for the swift integrated gate. This is meant to be
+ run on swift gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-object-storage
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-object-storage
+
- project:
templates:
- check-requirements
- - integrated-gate
- - integrated-gate-py35
+ - integrated-gate-py3
- openstack-cover-jobs
- - openstack-python-jobs
- - openstack-python35-jobs
- - openstack-python36-jobs
+ - openstack-python3-victoria-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
@@ -302,19 +620,15 @@
- ^playbooks/
- ^roles/
- ^.zuul.yaml$
- - nova-multiattach:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ - devstack-tempest-ipv6:
+ voting: false
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
- tempest-full-parallel:
- irrelevant-files:
- - ^(test-|)requirements.txt$
+ # Define list of irrelevant files to use everywhere else
+ irrelevant-files: &tempest-irrelevant-files
- ^.*\.rst$
- ^doc/.*$
- ^etc/.*$
@@ -322,9 +636,26 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
- - tempest-full-py36:
- irrelevant-files:
- - ^(test-|)requirements.txt$
+ - ^tools/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-ipv6:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-ussuri-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-train-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-stein-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-multinode-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-tox-plugin-sanity-check:
+ irrelevant-files: &tempest-irrelevant-files-2
- ^.*\.rst$
- ^doc/.*$
- ^etc/.*$
@@ -332,460 +663,85 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
- - tempest-full-rocky:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-full-rocky-py3:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-full-queens:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-full-queens-py3:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-full-pike:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-tox-plugin-sanity-check
- - tempest-slow:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - nova-cells-v1:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
+ # tools/ is not here since this relies on a script in tools/.
+ - tempest-ipv6-only:
+ irrelevant-files: *tempest-irrelevant-files-2
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
voting: false
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - neutron-grenade-multinode:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - neutron-grenade:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - devstack-plugin-ceph-tempest:
+ irrelevant-files: *tempest-irrelevant-files
+ - devstack-plugin-ceph-tempest-py3:
voting: false
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-grenade-multinode:
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade:
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario001-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario002-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario003-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario004-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-ocata:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - interop-tempest-consistency:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-no-admin-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - openstack-tox-bashate:
+ irrelevant-files: *tempest-irrelevant-files-2
gate:
jobs:
- - nova-multiattach:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-slow:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - neutron-grenade:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-ipv6-only:
+ irrelevant-files: *tempest-irrelevant-files-2
experimental:
jobs:
- tempest-cinder-v2-api:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-all-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-all:
+ irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-neutron-dvr-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr-ha-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-full-test-accounts:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-test-accounts:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-identity-v3-test-accounts:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-non-admin:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-nova-v20-api:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - nova-tempest-v2-api:
+ irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-lvm-multibackend:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-cinder-v1:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-pg-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-opensuse-423:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-pg-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-opensuse15:
+ irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
- - tempest-full-rocky
- - tempest-full-rocky-py3
- - tempest-full-queens
- - tempest-full-queens-py3
- - tempest-full-pike
- - legacy-periodic-tempest-dsvm-neutron-full-ocata:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ - tempest-full-ussuri-py3
+ - tempest-full-train-py3
+ - tempest-full-stein-py3
periodic:
jobs:
- - legacy-periodic-tempest-dsvm-full-test-accounts-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-neutron-full-test-accounts-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-neutron-full-non-admin-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-all-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
-
+ - tempest-all
+ - tempest-full-oslo-master
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..2300763
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,19 @@
+The source repository for this project can be found at:
+
+ https://opendev.org/openstack/tempest
+
+Pull requests submitted through GitHub are not monitored.
+
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
+
+ https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
+
+Bugs should be filed on Launchpad:
+
+ https://bugs.launchpad.net/tempest
+
+For more specific information about contributing to this repository, see the
+Tempest contributor guide:
+
+ https://docs.openstack.org/tempest/latest/contributor/contributing.html
diff --git a/HACKING.rst b/HACKING.rst
index e767b25..95bcbb5 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -6,13 +6,12 @@
- Step 2: Read on
Tempest Specific Commandments
-------------------------------
+-----------------------------
- [T102] Cannot import OpenStack python clients in tempest/api &
tempest/scenario tests
- [T104] Scenario tests require a services decorator
- [T105] Tests cannot use setUpClass/tearDownClass
-- [T106] vim configuration should not be kept in source files.
- [T107] Check that a service tag isn't in the module path
- [T108] Check no hyphen at the end of rand_name() argument
- [T109] Cannot use testtools.skip decorator; instead use
@@ -28,6 +27,8 @@
- [T117] Check negative tests have ``@decorators.attr(type=['negative'])``
applied.
+It is recommended to use ``tox -eautopep8`` before submitting a patch.
+
Test Data/Configuration
-----------------------
- Assume nothing about existing test data
@@ -35,6 +36,30 @@
- Clean up test data at the completion of each test
- Use configuration files for values that will vary by environment
+Supported OpenStack Components
+------------------------------
+
+Tempest's :ref:`library` and :ref:`plugin interface <tempest_plugin>` can be
+leveraged to support integration testing for virtually any OpenStack component.
+
+However, Tempest only offers **in-tree** integration testing coverage for the
+following components:
+
+* Cinder
+* Glance
+* Keystone
+* Neutron
+* Nova
+* Swift
+
+Historically, Tempest offered in-tree testing for other components as well, but
+since the introduction of the `External Plugin Interface`_, Tempest's in-tree
+testing scope has been limited to the projects above. Integration tests for
+projects not included above should go into one of the
+`relevant plugin projects`_.
+
+.. _External Plugin Interface: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/tempest-external-plugin-interface.html
+.. _relevant plugin projects: https://docs.openstack.org/tempest/latest/plugins/plugin-registry.html#detected-plugins
Exception Handling
------------------
@@ -431,7 +456,7 @@
by modifying Tempest's `lib installation script`_ for previous branches
(because DevStack is branched).
-.. _lib installation script: http://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/tempest
+.. _lib installation script: https://opendev.org/openstack/devstack/src/branch/master/lib/tempest
2. Bug fix on core project needing Tempest changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/README.rst b/README.rst
index 307ceb3..3cde2bf 100644
--- a/README.rst
+++ b/README.rst
@@ -10,274 +10,15 @@
Tempest - The OpenStack Integration Test Suite
==============================================
-The documentation for Tempest is officially hosted at:
-https://docs.openstack.org/tempest/latest/
-
This is a set of integration tests to be run against a live OpenStack
cluster. Tempest has batteries of tests for OpenStack API validation,
scenarios, and other specific tests useful in validating an OpenStack
deployment.
-Design Principles
------------------
-Tempest Design Principles that we strive to live by.
+ * Documentation: https://docs.openstack.org/tempest/latest/
+ * Features: https://specs.openstack.org/openstack/qa-specs/#tempest
+ * Bugs: https://bugs.launchpad.net/tempest/
+ * Release Notes: https://docs.openstack.org/releasenotes/tempest
-- Tempest should be able to run against any OpenStack cloud, be it a
- one node DevStack install, a 20 node LXC cloud, or a 1000 node KVM
- cloud.
-- Tempest should be explicit in testing features. It is easy to auto
- discover features of a cloud incorrectly, and give people an
- incorrect assessment of their cloud. Explicit is always better.
-- Tempest uses OpenStack public interfaces. Tests in Tempest should
- only touch public OpenStack APIs.
-- Tempest should not touch private or implementation specific
- interfaces. This means not directly going to the database, not
- directly hitting the hypervisors, not testing extensions not
- included in the OpenStack base. If there are some features of
- OpenStack that are not verifiable through standard interfaces, this
- should be considered a possible enhancement.
-- Tempest strives for complete coverage of the OpenStack API and
- common scenarios that demonstrate a working cloud.
-- Tempest drives load in an OpenStack cloud. By including a broad
- array of API and scenario tests Tempest can be reused in whole or in
- parts as load generation for an OpenStack cloud.
-- Tempest should attempt to clean up after itself, whenever possible
- we should tear down resources when done.
-- Tempest should be self-testing.
-
-Quickstart
-----------
-
-To run Tempest, you first need to create a configuration file that will tell
-Tempest where to find the various OpenStack services and other testing behavior
-switches. Where the configuration file lives and how you interact with it
-depends on how you'll be running Tempest. There are 2 methods of using Tempest.
-The first, which is a newer and recommended workflow treats Tempest as a system
-installed program. The second older method is to run Tempest assuming your
-working dir is the actually Tempest source repo, and there are a number of
-assumptions related to that. For this section we'll only cover the newer method
-as it is simpler, and quicker to work with.
-
-#. You first need to install Tempest. This is done with pip after you check out
- the Tempest repo::
-
- $ git clone https://git.openstack.org/openstack/tempest
- $ pip install tempest/
-
- This can be done within a venv, but the assumption for this guide is that
- the Tempest CLI entry point will be in your shell's PATH.
-
-#. Installing Tempest may create a ``/etc/tempest dir``, however if one isn't
- created you can create one or use ``~/.tempest/etc`` or ``~/.config/tempest`` in
- place of ``/etc/tempest``. If none of these dirs are created Tempest will create
- ``~/.tempest/etc`` when it's needed. The contents of this dir will always
- automatically be copied to all ``etc/`` dirs in local workspaces as an initial
- setup step. So if there is any common configuration you'd like to be shared
- between local Tempest workspaces it's recommended that you pre-populate it
- before running ``tempest init``.
-
-#. Setup a local Tempest workspace. This is done by using the tempest init
- command::
-
- $ tempest init cloud-01
-
- which also works the same as::
-
- $ mkdir cloud-01 && cd cloud-01 && tempest init
-
- This will create a new directory for running a single Tempest configuration.
- If you'd like to run Tempest against multiple OpenStack deployments the idea
- is that you'll create a new working directory for each to maintain separate
- configuration files and local artifact storage for each.
-
-#. Then ``cd`` into the newly created working dir and also modify the local
- config files located in the ``etc/`` subdir created by the ``tempest init``
- command. Tempest is expecting a ``tempest.conf`` file in etc/ so if only a
- sample exists you must rename or copy it to tempest.conf before making
- any changes to it otherwise Tempest will not know how to load it. For
- details on configuring Tempest refer to the
- `Tempest Configuration <https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
-
-#. Once the configuration is done you're now ready to run Tempest. This can
- be done using the `Tempest Run <https://docs.openstack.org/tempest/latest/run.html#tempest-run>`_
- command. This can be done by either
- running::
-
- $ tempest run
-
- from the Tempest workspace directory. Or you can use the ``--workspace``
- argument to run in the workspace you created regardless of your current
- working directory. For example::
-
- $ tempest run --workspace cloud-01
-
- There is also the option to use `stestr`_ directly. For example, from
- the workspace dir run::
-
- $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
-
- will run the same set of tests as the default gate jobs. Or you can
- use `unittest`_ compatible test runners such as `testr`_, `pytest`_ etc.
-
-.. _unittest: https://docs.python.org/3/library/unittest.html
-.. _testr: https://testrepository.readthedocs.org/en/latest/MANUAL.html
-.. _stestr: https://stestr.readthedocs.org/en/latest/MANUAL.html
-.. _pytest: https://docs.pytest.org/en/latest/
-
-Library
--------
-Tempest exposes a library interface. This interface is a stable interface and
-should be backwards compatible (including backwards compatibility with the
-old tempest-lib package, with the exception of the import). If you plan to
-directly consume Tempest in your project you should only import code from the
-Tempest library interface, other pieces of Tempest do not have the same
-stable interface and there are no guarantees on the Python API unless otherwise
-stated.
-
-For more details refer to the `library documentation
-<https://docs.openstack.org/tempest/latest/library.html#library>`_
-
-Release Versioning
-------------------
-`Tempest Release Notes <https://docs.openstack.org/releasenotes/tempest>`_
-shows what changes have been released on each version.
-
-Tempest's released versions are broken into 2 sets of information. Depending on
-how you intend to consume Tempest you might need
-
-The version is a set of 3 numbers:
-
-X.Y.Z
-
-While this is almost `semver`_ like, the way versioning is handled is slightly
-different:
-
-X is used to represent the supported OpenStack releases for Tempest tests
-in-tree, and to signify major feature changes to Tempest. It's a monotonically
-increasing integer where each version either indicates a new supported OpenStack
-release, the drop of support for an OpenStack release (which will coincide with
-the upstream stable branch going EOL), or a major feature lands (or is removed)
-from Tempest.
-
-Y.Z is used to represent library interface changes. This is treated the same
-way as minor and patch versions from `semver`_ but only for the library
-interface. When Y is incremented we've added functionality to the library
-interface and when Z is incremented it's a bug fix release for the library.
-Also note that both Y and Z are reset to 0 at each increment of X.
-
-.. _semver: http://semver.org/
-
-Configuration
--------------
-
-Detailed configuration of Tempest is beyond the scope of this
-document, see `Tempest Configuration Documentation
-<https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
-for more details on configuring Tempest.
-The ``etc/tempest.conf.sample`` attempts to be a self-documenting
-version of the configuration.
-
-You can generate a new sample tempest.conf file, run the following
-command from the top level of the Tempest directory::
-
- $ tox -e genconfig
-
-The most important pieces that are needed are the user ids, OpenStack
-endpoints, and basic flavors and images needed to run tests.
-
-Unit Tests
-----------
-
-Tempest also has a set of unit tests which test the Tempest code itself. These
-tests can be run by specifying the test discovery path::
-
- $ stestr --test-path ./tempest/tests run
-
-By setting ``--test-path`` option to ./tempest/tests it specifies that test discover
-should only be run on the unit test directory. The default value of ``test_path``
-is ``test_path=./tempest/test_discover`` which will only run test discover on the
-Tempest suite.
-
-Alternatively, there are the py27 and py36 tox jobs which will run the unit
-tests with the corresponding version of python.
-
-One common activity is to just run a single test, you can do this with tox
-simply by specifying to just run py27 or py36 tests against a single test::
-
- $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
-
-Or all tests in the test_microversions.py file::
-
- $ tox -e py36 -- -n tempest.tests.test_microversions
-
-You may also use regular expressions to run any matching tests::
-
- $ tox -e py36 -- test_microversions
-
-Additionally, when running a single test, or test-file, the ``-n/--no-discover``
-argument is no longer required, however it may perform faster if included.
-
-For more information on these options and details about stestr, please see the
-`stestr documentation <http://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
-
-Python 3.x
-----------
-
-Starting during the Pike cycle Tempest has a gating CI job that runs Tempest
-with Python 3. Any Tempest release after 15.0.0 should fully support running
-under Python 3 as well as Python 2.7.
-
-Legacy run method
------------------
-
-The legacy method of running Tempest is to just treat the Tempest source code
-as a python unittest repository and run directly from the source repo. When
-running in this way you still start with a Tempest config file and the steps
-are basically the same except that it expects you know where the Tempest code
-lives on your system and requires a bit more manual interaction to get Tempest
-running. For example, when running Tempest this way things like a lock file
-directory do not get generated automatically and the burden is on the user to
-create and configure that.
-
-To start you need to create a configuration file. The easiest way to create a
-configuration file is to generate a sample in the ``etc/`` directory ::
-
- $ cd $TEMPEST_ROOT_DIR
- $ oslo-config-generator --config-file \
- tempest/cmd/config-generator.tempest.conf \
- --output-file etc/tempest.conf
-
-After that, open up the ``etc/tempest.conf`` file and edit the
-configuration variables to match valid data in your environment.
-This includes your Keystone endpoint, a valid user and credentials,
-and reference data to be used in testing.
-
-.. note::
-
- If you have a running DevStack environment, Tempest will be
- automatically configured and placed in ``/opt/stack/tempest``. It
- will have a configuration file already set up to work with your
- DevStack installation.
-
-Tempest is not tied to any single test runner, but `testr`_ is the most commonly
-used tool. Also, the nosetests test runner is **not** recommended to run Tempest.
-
-After setting up your configuration file, you can execute the set of Tempest
-tests by using ``testr`` ::
-
- $ testr run --parallel
-
-To run one single test serially ::
-
- $ testr run tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
-
-Tox also contains several existing job configurations. For example::
-
- $ tox -e full
-
-which will run the same set of tests as the OpenStack gate. (it's exactly how
-the gate invokes Tempest) Or::
-
- $ tox -e smoke
-
-to run the tests tagged as smoke.
+Get in touch via `email <mailto:openstack-discuss@lists.openstack.org>`_. Use
+[tempest] in your subject.
diff --git a/REVIEWING.rst b/REVIEWING.rst
index bf63ed2..e07e358 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -159,8 +159,35 @@
When to approve
---------------
-* Every patch needs two +2s before being approved.
-* It's ok to hold off on an approval until a subject matter expert reviews it
-* If a patch has already been approved but requires a trivial rebase to merge,
- you do not have to wait for a second +2, since the patch has already had
- two +2s.
+* It's OK to hold off on an approval until a subject matter expert reviews it.
+* Every patch needs two +2's before being approved.
+* However, a single Tempest core reviewer can approve patches without waiting
+ for another +2 in the following cases:
+
+ * If a patch has already been approved but requires a trivial rebase to
+ merge, then there is no need to wait for a second +2, since the patch has
+ already had two +2's.
+ * If any trivial patch set fixes one of the items below:
+
+ * Documentation or code comment typo
+ * Documentation ref link
+ * Example: `example`_
+
+ .. note::
+
+ Any other small documentation, CI job, or code change does not fall under
+ this category.
+
+ * If the patch **unblocks** a failing project gate, provided that:
+
+ * the project's PTL +1's the change
+ * the patch does not affect any other project's testing gates
+ * the patch does not cause any negative side effects
+ * If fixing and removing the faulty plugin (which leads to fail
+ voting ``tempest-tox-plugin-sanity-check`` job) and unblock the
+ tempest gate
+
+ Note that such a policy should be used judiciously, as we should strive to
+ have two +2's on each patch set, prior to approval.
+
+.. _example: https://review.opendev.org/#/c/611032/
diff --git a/doc/requirements.txt b/doc/requirements.txt
index d959d44..30394e8 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,6 +1,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-openstackdocstheme>=1.18.1 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
+openstackdocstheme>=2.2.0 # Apache-2.0
+reno>=3.1.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index 7745594..5422af7 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1 +1,4 @@
redirectmatch 301 ^/developer/tempest/(.*) /tempest/latest/$1
+redirectmatch 301 ^/tempest/latest/plugin.html /tempest/latest/plugins/plugin.html
+redirectmatch 301 ^/tempest/latest/plugin-registry.html /tempest/latest/plugins/plugin-registry
+redirectmatch 301 ^/tempest/latest/#support-policy /tempest/latest/#stable-branch-support-policy
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c2ea628..dd7c544 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -52,6 +52,7 @@
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
+ 'sphinxcontrib.rsvgconverter',
'openstackdocstheme',
'oslo_config.sphinxconfiggen',
]
@@ -62,12 +63,10 @@
todo_include_todos = True
# openstackdocstheme options
-repository_name = 'openstack/tempest'
-bug_project = 'tempest'
-bug_tag = 'doc'
-
-# Must set this variable to include year, month, day, hours, and minutes.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
+openstackdocs_repo_name = 'openstack/tempest'
+openstackdocs_bug_project = 'tempest'
+openstackdocs_bug_tag = 'doc'
+openstackdocs_pdf_link = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -82,7 +81,6 @@
master_doc = 'index'
# General information about the project.
-project = u'Tempest'
copyright = u'2013, OpenStack QA Team'
# The language for content autogenerated by Sphinx. Refer to documentation
@@ -196,3 +194,16 @@
# A list of warning types to suppress arbitrary warning messages.
suppress_warnings = ['image.nonlocal_uri']
+
+# -- Options for LaTeX output -------------------------------------------------
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+ ('index', 'doc-tempest.tex', u'Tempest Testing Project',
+ u'OpenStack Foundation', 'manual'),
+]
+
+# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
+latex_use_xindy = False
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 2e5f706..36828e0 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -108,12 +108,12 @@
insufficient for certain use cases because of the admin credentials requirement
to create the credential sets on demand. To get around that the accounts.yaml
file was introduced and with that a new internal credential provider to enable
-using the list of credentials instead of creating them on demand. With locking
-test accounts each test class will reserve a set of credentials from the
-accounts.yaml before executing any of its tests so that each class is isolated
-like with dynamic credentials.
+using the list of credentials instead of creating them on demand. With
+pre-provisioned credentials (also known as locking test accounts) each test
+class will reserve a set of credentials from the accounts.yaml before executing
+any of its tests so that each class is isolated like with dynamic credentials.
-To enable and use locking test accounts you need do a few things:
+To enable and use pre-provisioned credentials you need do a few things:
#. Create an accounts.yaml file which contains the set of pre-existing
credentials to use for testing. To make sure you don't have a credentials
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 0000000..9c79a1f
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,59 @@
+============================
+So You Want to Contribute...
+============================
+
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+Below will cover the more project specific information you need to get started
+with Tempest.
+
+Communication
+~~~~~~~~~~~~~
+* IRC channel ``#openstack-qa`` at FreeNode
+* Mailing list (prefix subjects with ``[qa]`` for faster responses)
+ http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+~~~~~~~~~~~~~~~~~~~~~~~~
+Please refer to the `Tempest Core Team
+<https://review.opendev.org/#/admin/groups/42,members>`_ contacts.
+
+New Feature Planning
+~~~~~~~~~~~~~~~~~~~~
+If you want to propose a new feature please read `Feature Proposal Process`_
+Tempest features are tracked on `Launchpad BP <https://blueprints.launchpad.net/tempest>`_.
+
+Task Tracking
+~~~~~~~~~~~~~
+We track our tasks in `Launchpad <https://bugs.launchpad.net/tempest>`_.
+
+If you're looking for some smaller, easier work item to pick up and get started
+on, search for the 'low-hanging-fruit' tag.
+
+Reporting a Bug
+~~~~~~~~~~~~~~~
+You found an issue and want to make sure we are aware of it? You can do so on
+`Launchpad <https://bugs.launchpad.net/tempest/+filebug>`__.
+More info about Launchpad usage can be found on `OpenStack docs page
+<https://docs.openstack.org/contributors/common/task-tracking.html#launchpad>`_
+
+Getting Your Patch Merged
+~~~~~~~~~~~~~~~~~~~~~~~~~
+All changes proposed to the Tempest require two ``Code-Review +2`` votes from
+Tempest core reviewers before one of the core reviewers can approve the patch by
+giving ``Workflow +1`` vote. More detailed guidelines for reviewers are available
+at :doc:`../REVIEWING`.
+
+Project Team Lead Duties
+~~~~~~~~~~~~~~~~~~~~~~~~
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
+
+The Release Process for QA is documented in `QA Release Process
+<https://wiki.openstack.org/wiki/QA/releases>`_.
+
+.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions
diff --git a/doc/source/data/tempest-blacklisted-plugins-registry.header b/doc/source/data/tempest-blacklisted-plugins-registry.header
new file mode 100644
index 0000000..6b6af11
--- /dev/null
+++ b/doc/source/data/tempest-blacklisted-plugins-registry.header
@@ -0,0 +1,7 @@
+Blacklisted Plugins
+===================
+
+List of Tempest plugin projects that are stale or unmaintained for a long
+time (6 months or more). They can be moved out of blacklist state once one
+of the relevant patches gets merged:
+https://review.opendev.org/#/q/topic:tempest-sanity-gate+%28status:open%29
diff --git a/doc/source/data/tempest-plugins-registry.header b/doc/source/data/tempest-plugins-registry.header
index 0de12b7..831d8a6 100644
--- a/doc/source/data/tempest-plugins-registry.header
+++ b/doc/source/data/tempest-plugins-registry.header
@@ -3,9 +3,9 @@
job. You should edit the files data/tempest-plugins-registry.footer
and data/tempest-plugins-registry.header instead of this one.
-==========================
- Tempest Plugin Registry
-==========================
+=======================
+Tempest Plugin Registry
+=======================
Since we've created the external plugin mechanism, it's gotten used by
a lot of projects. The following is a list of plugins that currently
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fecf98a..f878888 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -48,6 +48,24 @@
workspace
run
+Supported OpenStack Releases and Python Versions
+------------------------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ supported_version
+
+For Contributors
+================
+
+* If you are a new contributor to Tempest please refer: :doc:`contributor/contributing`
+
+.. toctree::
+ :hidden:
+
+ contributor/contributing
+
Developers Guide
================
@@ -69,8 +87,31 @@
.. toctree::
:maxdepth: 2
- plugin
- plugin-registry
+ plugins/index
+
+Tempest & Plugins Compatible Version Policy
+-------------------------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ tempest_and_plugins_compatible_version_policy
+
+Stable Branch Support Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_support_policy
+
+Stable Branch Testing Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_testing_policy
Library
-------
@@ -80,15 +121,12 @@
library
-Support Policy
---------------
+Search
+======
-.. toctree::
- :maxdepth: 2
+.. only:: html
- stable_branch_support_policy
+ * :ref:`Tempest document search <search>`: Search the contents of this document.
-Indices and tables
-==================
-
-* :ref:`search`
+* `OpenStack wide search <https://docs.openstack.org>`_: Search the wider
+ set of OpenStack documentation, including forums.
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 983fa24..5bc0eac 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -338,8 +338,8 @@
.. _2.26: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id23
-* `2.28`_
-
+ * `2.28`_
+
.. _2.28: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id25
* `2.32`_
@@ -394,6 +394,10 @@
.. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52
+ * `2.59`_
+
+ .. _2.59: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
+
* `2.60`_
.. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-queens
@@ -406,6 +410,18 @@
.. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
+ * `2.70`_
+
+ .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id63
+
+ * `2.71`_
+
+ .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
+
+ * `2.73`_
+
+ .. _2.73: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id66
+
* Volume
* `3.3`_
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
deleted file mode 120000
index c768ff7..0000000
--- a/doc/source/overview.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../README.rst
\ No newline at end of file
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
new file mode 100644
index 0000000..e51b90b
--- /dev/null
+++ b/doc/source/overview.rst
@@ -0,0 +1,281 @@
+Tempest - The OpenStack Integration Test Suite
+==============================================
+
+The documentation for Tempest is officially hosted at:
+https://docs.openstack.org/tempest/latest/
+
+This is a set of integration tests to be run against a live OpenStack
+cluster. Tempest has batteries of tests for OpenStack API validation,
+scenarios, and other specific tests useful in validating an OpenStack
+deployment.
+
+Team and repository tags
+------------------------
+
+.. image:: https://governance.openstack.org/tc/badges/tempest.svg
+ :target: https://governance.openstack.org/tc/reference/tags/index.html
+
+.. Change things from this point on
+
+Design Principles
+-----------------
+Tempest Design Principles that we strive to live by.
+
+- Tempest should be able to run against any OpenStack cloud, be it a
+ one node DevStack install, a 20 node LXC cloud, or a 1000 node KVM
+ cloud.
+- Tempest should be explicit in testing features. It is easy to auto
+ discover features of a cloud incorrectly, and give people an
+ incorrect assessment of their cloud. Explicit is always better.
+- Tempest uses OpenStack public interfaces. Tests in Tempest should
+ only touch public OpenStack APIs.
+- Tempest should not touch private or implementation specific
+ interfaces. This means not directly going to the database, not
+ directly hitting the hypervisors, not testing extensions not
+ included in the OpenStack base. If there are some features of
+ OpenStack that are not verifiable through standard interfaces, this
+ should be considered a possible enhancement.
+- Tempest strives for complete coverage of the OpenStack API and
+ common scenarios that demonstrate a working cloud.
+- Tempest drives load in an OpenStack cloud. By including a broad
+ array of API and scenario tests Tempest can be reused in whole or in
+ parts as load generation for an OpenStack cloud.
+- Tempest should attempt to clean up after itself, whenever possible
+ we should tear down resources when done.
+- Tempest should be self-testing.
+
+Quickstart
+----------
+
+To run Tempest, you first need to create a configuration file that will tell
+Tempest where to find the various OpenStack services and other testing behavior
+switches. Where the configuration file lives and how you interact with it
+depends on how you'll be running Tempest. There are 2 methods of using Tempest.
+The first, which is a newer and recommended workflow treats Tempest as a system
+installed program. The second older method is to run Tempest assuming your
+working dir is the actually Tempest source repo, and there are a number of
+assumptions related to that. For this section we'll only cover the newer method
+as it is simpler, and quicker to work with.
+
+#. You first need to install Tempest. This is done with pip after you check out
+ the Tempest repo::
+
+ $ git clone https://opendev.org/openstack/tempest
+ $ pip install tempest/
+
+ This can be done within a venv, but the assumption for this guide is that
+ the Tempest CLI entry point will be in your shell's PATH.
+
+#. Installing Tempest may create a ``/etc/tempest dir``, however if one isn't
+ created you can create one or use ``~/.tempest/etc`` or ``~/.config/tempest`` in
+ place of ``/etc/tempest``. If none of these dirs are created Tempest will create
+ ``~/.tempest/etc`` when it's needed. The contents of this dir will always
+ automatically be copied to all ``etc/`` dirs in local workspaces as an initial
+ setup step. So if there is any common configuration you'd like to be shared
+ between local Tempest workspaces it's recommended that you pre-populate it
+ before running ``tempest init``.
+
+#. Setup a local Tempest workspace. This is done by using the tempest init
+ command::
+
+ $ tempest init cloud-01
+
+ which also works the same as::
+
+ $ mkdir cloud-01 && cd cloud-01 && tempest init
+
+ This will create a new directory for running a single Tempest configuration.
+ If you'd like to run Tempest against multiple OpenStack deployments the idea
+ is that you'll create a new working directory for each to maintain separate
+ configuration files and local artifact storage for each.
+
+#. Then ``cd`` into the newly created working dir and also modify the local
+ config files located in the ``etc/`` subdir created by the ``tempest init``
+ command. Tempest is expecting a ``tempest.conf`` file in etc/ so if only a
+ sample exists you must rename or copy it to tempest.conf before making
+ any changes to it otherwise Tempest will not know how to load it. For
+ details on configuring Tempest refer to the
+ `Tempest Configuration <https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
+
+#. Once the configuration is done you're now ready to run Tempest. This can
+ be done using the `Tempest Run <https://docs.openstack.org/tempest/latest/run.html#tempest-run>`_
+ command. This can be done by either
+ running::
+
+ $ tempest run
+
+ from the Tempest workspace directory. Or you can use the ``--workspace``
+ argument to run in the workspace you created regardless of your current
+ working directory. For example::
+
+ $ tempest run --workspace cloud-01
+
+ There is also the option to use `stestr`_ directly. For example, from
+ the workspace dir run::
+
+ $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
+
+ will run the same set of tests as the default gate jobs. Or you can
+ use `unittest`_ compatible test runners such as `stestr`_, `pytest`_ etc.
+
+ Tox also contains several existing job configurations. For example::
+
+ $ tox -e full
+
+ which will run the same set of tests as the OpenStack gate. (it's exactly how
+ the gate invokes Tempest) Or::
+
+ $ tox -e smoke
+
+ to run the tests tagged as smoke.
+
+.. _unittest: https://docs.python.org/3/library/unittest.html
+.. _stestr: https://stestr.readthedocs.org/en/latest/MANUAL.html
+.. _pytest: https://docs.pytest.org/en/latest/
+
+Library
+-------
+Tempest exposes a library interface. This interface is a stable interface and
+should be backwards compatible (including backwards compatibility with the
+old tempest-lib package, with the exception of the import). If you plan to
+directly consume Tempest in your project you should only import code from the
+Tempest library interface, other pieces of Tempest do not have the same
+stable interface and there are no guarantees on the Python API unless otherwise
+stated.
+
+For more details refer to the `library documentation
+<https://docs.openstack.org/tempest/latest/library.html#library>`_
+
+Release Versioning
+------------------
+`Tempest Release Notes <https://docs.openstack.org/releasenotes/tempest>`_
+shows what changes have been released on each version.
+
+Tempest's released versions are broken into 2 sets of information. Depending on
+how you intend to consume Tempest you might need
+
+The version is a set of 3 numbers:
+
+X.Y.Z
+
+While this is almost `semver`_ like, the way versioning is handled is slightly
+different:
+
+X is used to represent the supported OpenStack releases for Tempest tests
+in-tree, and to signify major feature changes to Tempest. It's a monotonically
+increasing integer where each version either indicates a new supported OpenStack
+release, the drop of support for an OpenStack release (which will coincide with
+the upstream stable branch going EOL), or a major feature lands (or is removed)
+from Tempest.
+
+Y.Z is used to represent library interface changes. This is treated the same
+way as minor and patch versions from `semver`_ but only for the library
+interface. When Y is incremented we've added functionality to the library
+interface and when Z is incremented it's a bug fix release for the library.
+Also note that both Y and Z are reset to 0 at each increment of X.
+
+.. _semver: https://semver.org/
+
+Configuration
+-------------
+
+Detailed configuration of Tempest is beyond the scope of this
+document, see `Tempest Configuration Documentation
+<https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
+for more details on configuring Tempest.
+The ``etc/tempest.conf.sample`` attempts to be a self-documenting
+version of the configuration.
+
+You can generate a new sample tempest.conf file, run the following
+command from the top level of the Tempest directory::
+
+ $ tox -e genconfig
+
+The most important pieces that are needed are the user ids, OpenStack
+endpoints, and basic flavors and images needed to run tests.
+
+Unit Tests
+----------
+
+Tempest also has a set of unit tests which test the Tempest code itself. These
+tests can be run by specifying the test discovery path::
+
+ $ stestr --test-path ./tempest/tests run
+
+By setting ``--test-path`` option to ./tempest/tests it specifies that test discover
+should only be run on the unit test directory. The default value of ``test_path``
+is ``test_path=./tempest/test_discover`` which will only run test discover on the
+Tempest suite.
+
+Alternatively, there are the py27 and py36 tox jobs which will run the unit
+tests with the corresponding version of python.
+
+One common activity is to just run a single test, you can do this with tox
+simply by specifying to just run py27 or py36 tests against a single test::
+
+ $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+
+Or all tests in the test_microversions.py file::
+
+ $ tox -e py36 -- -n tempest.tests.test_microversions
+
+You may also use regular expressions to run any matching tests::
+
+ $ tox -e py36 -- test_microversions
+
+Additionally, when running a single test, or test-file, the ``-n/--no-discover``
+argument is no longer required, however it may perform faster if included.
+
+For more information on these options and details about stestr, please see the
+`stestr documentation <https://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
+
+Python 3.x
+----------
+
+Starting during the Pike cycle Tempest has a gating CI job that runs Tempest
+with Python 3. Any Tempest release after 15.0.0 should fully support running
+under Python 3 as well as Python 2.7.
+
+Legacy run method
+-----------------
+
+The legacy method of running Tempest is to just treat the Tempest source code
+as a python unittest repository and run directly from the source repo. When
+running in this way you still start with a Tempest config file and the steps
+are basically the same except that it expects you know where the Tempest code
+lives on your system and requires a bit more manual interaction to get Tempest
+running. For example, when running Tempest this way things like a lock file
+directory do not get generated automatically and the burden is on the user to
+create and configure that.
+
+To start you need to create a configuration file. The easiest way to create a
+configuration file is to generate a sample in the ``etc/`` directory ::
+
+ $ cd $TEMPEST_ROOT_DIR
+ $ oslo-config-generator --config-file \
+ tempest/cmd/config-generator.tempest.conf \
+ --output-file etc/tempest.conf
+
+After that, open up the ``etc/tempest.conf`` file and edit the
+configuration variables to match valid data in your environment.
+This includes your Keystone endpoint, a valid user and credentials,
+and reference data to be used in testing.
+
+.. note::
+
+ If you have a running DevStack environment, Tempest will be
+ automatically configured and placed in ``/opt/stack/tempest``. It
+ will have a configuration file already set up to work with your
+ DevStack installation.
+
+Tempest is not tied to any single test runner, but `stestr`_ is the most commonly
+used tool. Also, the nosetests test runner is **not** recommended to run Tempest.
+
+After setting up your configuration file, you can execute the set of Tempest
+tests by using ``stestr``. By default, ``stestr`` runs tests in parallel ::
+
+ $ stestr run
+
+To run one single test serially ::
+
+ $ stestr run --serial tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
diff --git a/doc/source/plugins/index.rst b/doc/source/plugins/index.rst
new file mode 100644
index 0000000..f961ac7
--- /dev/null
+++ b/doc/source/plugins/index.rst
@@ -0,0 +1,40 @@
+=====================
+Tempest Plugins Guide
+=====================
+
+.. toctree::
+ :maxdepth: 2
+
+ plugin
+
+Stable Branch Support Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ ../stable_branch_support_policy
+
+Stable Branch Testing Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ ../stable_branch_testing_policy
+
+Tempest & Plugins Compatible Version Policy
+-------------------------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ ../tempest_and_plugins_compatible_version_policy
+
+Plugins Registry
+----------------
+
+.. toctree::
+ :maxdepth: 2
+
+ plugin-registry
diff --git a/doc/source/plugin.rst b/doc/source/plugins/plugin.rst
similarity index 97%
rename from doc/source/plugin.rst
rename to doc/source/plugins/plugin.rst
index 9958792..a9e2059 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -43,7 +43,7 @@
In order to create the basic structure with base classes and test directories
you can use the tempest-plugin-cookiecutter project::
- > pip install -U cookiecutter && cookiecutter https://git.openstack.org/openstack/tempest-plugin-cookiecutter
+ > pip install -U cookiecutter && cookiecutter https://opendev.org/openstack/tempest-plugin-cookiecutter
Cloning into 'tempest-plugin-cookiecutter'...
remote: Counting objects: 17, done.
@@ -96,7 +96,7 @@
that users don't have to worry about inadvertently installing a Tempest plugin
when they install another package.
-.. _Branchless Tempest Spec: http://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
+.. _Branchless Tempest Spec: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
The sole advantage to integrating a plugin into an existing python project is
that it enables you to land code changes at the same time you land test changes
diff --git a/doc/source/stable_branch_testing_policy.rst b/doc/source/stable_branch_testing_policy.rst
new file mode 100644
index 0000000..02c5338
--- /dev/null
+++ b/doc/source/stable_branch_testing_policy.rst
@@ -0,0 +1,33 @@
+Stable Branch Testing Policy
+============================
+
+Tempest and its plugins need to support the stable branches
+as per :doc:`Stable Branch Support Policy </stable_branch_support_policy>`.
+
+Because of branchless model of Tempest and plugins, all the supported
+stable branches use the Tempest and plugins master version for their
+testing. That is done in devstack by using the `master branch
+<https://opendev.org/openstack/devstack/src/commit/c104afec7dd72edfd909847bee9c14eaf077a28b/stackrc#L314>`_
+for the Tempest installation. To make sure the master version of Tempest or
+plugins (for any changes or adding new tests) is compatible for all
+the supported stable branches testing, Tempest and its plugins need to
+add the stable branches job on the master gate. That way can test the stable
+branches against master code and can avoid breaking supported branches
+accidentally.
+
+Example:
+
+* `Stable jobs on Tempest master
+ <https://opendev.org/openstack/tempest/src/commit/e8f1876aa6772077f85f380677b30251c2454505/.zuul.yaml#L646-L651>`_.
+
+* `Stable job on neutron tempest plugins
+ <https://opendev.org/openstack/neutron-tempest-plugin/src/commit/4bc1b00213cf660648cad1916fe6497ac29b2e78/.zuul.yaml#L1427-L1428>`_
+
+Once any stable branch is moved to the `Extended Maintenance Phases`_
+and devstack start using the Tempest older version for that stable
+branch testing then we can remove that stable branch job from master
+gate.
+
+Example: https://review.opendev.org/#/c/722183/
+
+.. _Extended Maintenance Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#extended-maintenance
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
new file mode 100644
index 0000000..388b4cd
--- /dev/null
+++ b/doc/source/supported_version.rst
@@ -0,0 +1,36 @@
+Supported OpenStack Releases and Python Versions
+================================================
+
+This document lists the officially supported OpenStack releases
+and python versions by Tempest.
+
+Compatible OpenStack Releases
+-----------------------------
+
+Tempest master supports the below OpenStack Releases:
+
+* Ussuri
+* Train
+* Stein
+
+For older OpenStack Release:
+
+For any older OpenStack Release than the listed above, Tempest master might work. But if
+Tempest master starts failing then, you can use the respective Tempest tag listed in OpenStack
+release page.
+
+For example: OpenStack Stein: Tempest 20.0.0
+
+* https://releases.openstack.org/stein/index.html#stein-tempest
+
+How to use Tempest tag on Extended Maintenance stable branch:
+
+* https://review.opendev.org/#/c/705098/
+
+Supported Python Versions
+-------------------------
+
+Tempest master supports the below python versions:
+
+* Python 3.6
+* Python 3.7
diff --git a/doc/source/tempest_and_plugins_compatible_version_policy.rst b/doc/source/tempest_and_plugins_compatible_version_policy.rst
new file mode 100644
index 0000000..942b1bd
--- /dev/null
+++ b/doc/source/tempest_and_plugins_compatible_version_policy.rst
@@ -0,0 +1,54 @@
+Tempest and Plugins compatible version policy
+=============================================
+
+Tempest and its plugins are responsible for the integrated
+testing of OpenStack. These tools have two use cases:
+
+#. Testing upstream code at gate
+#. Testing Production Cloud
+
+Upstream code is tested by the master version of branchless Tempest & plugins
+for all supported stable branches in `Maintained phase`_.
+
+Production Cloud can be tested by using the compatible version or using
+master version. It depends on the testing strategy of cloud. To provide
+the compatible version of Tempest and its Plugins per OpenStack release,
+we started the coordinated release of all plugins and Tempest per OpenStack
+release.
+These versions are the first set of versions from Tempest and its Plugins to
+officially start the support of a particular OpenStack release. For example:
+OpenStack Train release first compatible versions `Tempest plugins version`_.
+
+Because of branchless nature of Tempest and its plugins, first version
+released during OpenStack release is not the last version to support that
+OpenStack release. This means the next (or master) versions can also be used
+for upstream testing as well as in production testing.
+
+Since the `Extended Maintenance policy`_ for stable branch, Tempest
+started releasing the ``end of support`` version once stable release
+is moved to EM state, which used to happen on EOL of stable release. This is
+the last compatible version of Tempest for the OpenStack release moved to EM.
+
+Because of branchless nature as explained above, we have a range of versions
+which can be considered a compatible version for particular OpenStack release.
+How we should release those versions is mentioned in the below table.
+
+ +-----------------------------+-----------------+------------------------------------+
+ | First compatible version -> | OpenStack 'XYZ' | <- Last compatible version |
+ +=============================+=================+====================================+
+ |This is the latest version | |This is the version released |
+ |released when OpenStack | |when OpenStack 'XYZ' is moved to |
+ |'XYZ' is released. | |EM state. Hash used for this should |
+ |Example: | |be the hash from master at the time |
+ |`Tempest plugins version`_ | |of branch is EM not the one used for|
+ | | |First compatible version |
+ +-----------------------------+-----------------+------------------------------------+
+
+Tempest & the Plugins should follow the above mentioned policy for the
+``First compatible version`` and the ``Last compatible version.``
+so that we provide the right set of compatible versions to Upstream as well as to
+Production Cloud testing.
+
+.. _Maintained phase: https://docs.openstack.org/project-team-guide/stable-branches.html#maintained
+.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
+.. _Tempest plugins version: https://releases.openstack.org/train/#tempest-plugins
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index e249bdd..ff4fa09 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -128,8 +128,9 @@
people to respond to removal proposals please add things to the agenda by the
Monday before the meeting.
-The other option is to raise the removal on the openstack-dev mailing list.
-(for example see: http://lists.openstack.org/pipermail/openstack-dev/2016-February/086218.html )
+The other option is to raise the removal on the openstack-discuss mailing list.
+(for example see: http://lists.openstack.org/pipermail/openstack-dev/2016-February/086218.html
+or http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003574.html )
This will raise the issue to the wider community and attract at least the same
(most likely more) attention than discussing it during the irc meeting. The
only downside is that it might take more time to get a response, given the
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
new file mode 100644
index 0000000..5f72345
--- /dev/null
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -0,0 +1,24 @@
+# Changes that run through devstack-tempest-ipv6 are likely to have an impact on
+# the devstack part of the job, so we keep devstack in the main play to
+# avoid zuul retrying on legitimate failures.
+- hosts: all
+ roles:
+ - orchestrate-devstack
+
+# We run tests only on one node, regardless how many nodes are in the system
+- hosts: tempest
+ environment:
+ # This enviroment variable is used by the optional tempest-gabbi
+ # job provided by the gabbi-tempest plugin. It can be safely ignored
+ # if that plugin is not being used.
+ GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
+ roles:
+ - setup-tempest-run-dir
+ - setup-tempest-data-dir
+ - acl-devstack-files
+ # Verify the IPv6-only deployments. This role will perform check for
+ # IPv6 only env for example Devstack IPv6 settings and services listen
+ # address is IPv6 etc. This is invoked before tests are run so that we can
+ # fail early if anything missing the IPv6 settings or deployments.
+ - ipv6-only-deployments-verification
+ - run-tempest
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 01155a8..7ee7411 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -7,8 +7,46 @@
# We run tests only on one node, regardless how many nodes are in the system
- hosts: tempest
- roles:
- - setup-tempest-run-dir
- - setup-tempest-data-dir
- - acl-devstack-files
- - run-tempest
+ environment:
+ # This enviroment variable is used by the optional tempest-gabbi
+ # job provided by the gabbi-tempest plugin. It can be safely ignored
+ # if that plugin is not being used.
+ GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
+ tasks:
+ - name: Setup Tempest Run Directory
+ include_role:
+ name: setup-tempest-run-dir
+
+ - name: Setup Tempest Data Directory
+ include_role:
+ name: setup-tempest-data-dir
+
+ - name: ACL devstack files
+ include_role:
+ name: acl-devstack-files
+
+ - name: Run tempest cleanup init-saved-state
+ include_role:
+ name: tempest-cleanup
+ vars:
+ init_saved_state: true
+ when:
+ - run_tempest_dry_cleanup is defined
+ - run_tempest_cleanup is defined
+
+ - name: Run Tempest
+ include_role:
+ name: run-tempest
+
+ - name: Run tempest cleanup dry-run
+ include_role:
+ name: tempest-cleanup
+ vars:
+ dry_run: true
+ when:
+ - run_tempest_dry_cleanup is defined
+
+ - name: Run tempest cleanup
+ include_role:
+ name: tempest-cleanup
+ when: run_tempest_cleanup is defined
diff --git a/releasenotes/notes/12/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml b/releasenotes/notes/12/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml
index 092014e..8787e0c 100644
--- a/releasenotes/notes/12/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml
+++ b/releasenotes/notes/12/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml
@@ -4,5 +4,5 @@
Adds subunit-describe-calls. A parser for subunit streams to determine what
REST API calls are made inside of a test and in what order they are called.
- * Input can be piped in or a file can be specified
- * Output is shortened for stdout, the output file has more information
+ * Input can be piped in or a file can be specified
+ * Output is shortened for stdout, the output file has more information
diff --git a/releasenotes/notes/12/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml b/releasenotes/notes/12/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml
index f9173a0..b857db8 100644
--- a/releasenotes/notes/12/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml
+++ b/releasenotes/notes/12/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml
@@ -6,8 +6,8 @@
so the other projects can use these modules as stable libraries without
any maintenance changes.
- * endpoints_client(v2)
- * roles_client(v2)
- * services_client(v2)
- * tenants_client(v2)
- * users_client(v2)
+ * endpoints_client(v2)
+ * roles_client(v2)
+ * services_client(v2)
+ * tenants_client(v2)
+ * users_client(v2)
diff --git a/releasenotes/notes/12/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml b/releasenotes/notes/12/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml
index 1fa4ddd..492aac8 100644
--- a/releasenotes/notes/12/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml
+++ b/releasenotes/notes/12/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml
@@ -6,10 +6,10 @@
so the other projects can use these modules as stable libraries
without any maintenance changes.
- * image_members_client(v1)
- * images_client(v1)
- * image_members_client(v2)
- * images_client(v2)
- * namespaces_client(v2)
- * resource_types_client(v2)
- * schemas_client(v2)
+ * image_members_client(v1)
+ * images_client(v1)
+ * image_members_client(v2)
+ * images_client(v2)
+ * namespaces_client(v2)
+ * resource_types_client(v2)
+ * schemas_client(v2)
diff --git a/releasenotes/notes/12/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml b/releasenotes/notes/12/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml
index 3ec8b56..6e08743 100644
--- a/releasenotes/notes/12/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml
+++ b/releasenotes/notes/12/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml
@@ -6,8 +6,8 @@
so the other projects can use these modules as stable libraries without
any maintenance changes.
- * endpoints_client(v3)
- * policies_client (v3)
- * regions_client(v3)
- * services_client(v3)
- * projects_client(v3)
+ * endpoints_client(v3)
+ * policies_client (v3)
+ * regions_client(v3)
+ * services_client(v3)
+ * projects_client(v3)
diff --git a/releasenotes/notes/12/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml b/releasenotes/notes/12/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml
index cf504ad..32cd182 100644
--- a/releasenotes/notes/12/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml
+++ b/releasenotes/notes/12/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml
@@ -6,13 +6,13 @@
so the other projects can use these modules as stable libraries
without any maintenance changes.
- * availability_zone_client(v1)
- * availability_zone_client(v2)
- * extensions_client(v1)
- * extensions_client(v2)
- * hosts_client(v1)
- * hosts_client(v2)
- * quotas_client(v1)
- * quotas_client(v2)
- * services_client(v1)
- * services_client(v2)
+ * availability_zone_client(v1)
+ * availability_zone_client(v2)
+ * extensions_client(v1)
+ * extensions_client(v2)
+ * hosts_client(v1)
+ * hosts_client(v2)
+ * quotas_client(v1)
+ * quotas_client(v2)
+ * services_client(v1)
+ * services_client(v2)
diff --git a/releasenotes/notes/13/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml b/releasenotes/notes/13/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml
index 9e828f6..a0de2c3 100644
--- a/releasenotes/notes/13/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml
+++ b/releasenotes/notes/13/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml
@@ -5,11 +5,11 @@
Add new service clients to the library interface so the other projects can use these modules as stable libraries without
any maintenance changes.
- * identity_client(v2)
- * groups_client(v3)
- * trusts_client(v3)
- * users_client(v3)
- * identity_client(v3)
- * roles_client(v3)
- * inherited_roles_client(v3)
- * credentials_client(v3)
+ * identity_client(v2)
+ * groups_client(v3)
+ * trusts_client(v3)
+ * users_client(v3)
+ * identity_client(v3)
+ * roles_client(v3)
+ * inherited_roles_client(v3)
+ * credentials_client(v3)
diff --git a/releasenotes/notes/14/14.0.0-add-image-clients-af94564fb34ddca6.yaml b/releasenotes/notes/14/14.0.0-add-image-clients-af94564fb34ddca6.yaml
index 7e40fd4..6346156 100644
--- a/releasenotes/notes/14/14.0.0-add-image-clients-af94564fb34ddca6.yaml
+++ b/releasenotes/notes/14/14.0.0-add-image-clients-af94564fb34ddca6.yaml
@@ -5,5 +5,5 @@
http://developer.openstack.org/api-ref/image/v2/metadefs-index.html,
there are some apis are not included, add them.
- * namespace_properties_client(v2)
+ * namespace_properties_client(v2)
diff --git a/releasenotes/notes/15/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml b/releasenotes/notes/15/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
index eaab1f0..a4e5791 100644
--- a/releasenotes/notes/15/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
+++ b/releasenotes/notes/15/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
@@ -5,6 +5,6 @@
http://developer.openstack.org/api-ref/image/v2/metadefs-index.html,
there are some apis are not included, add them.
- * namespace_objects_client(v2)
- * namespace_tags_client(v2)
+ * namespace_objects_client(v2)
+ * namespace_tags_client(v2)
diff --git a/releasenotes/notes/Extend-cleanup-CLI-to-delete-regions-9f1dbda2c8de12e2.yaml b/releasenotes/notes/Extend-cleanup-CLI-to-delete-regions-9f1dbda2c8de12e2.yaml
new file mode 100644
index 0000000..e2fc5b3
--- /dev/null
+++ b/releasenotes/notes/Extend-cleanup-CLI-to-delete-regions-9f1dbda2c8de12e2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ tempest cleanup CLI is extended about region deletion. Until now, the
+ regions have been neglected by tempest cleanup. From now on, tempest
+ cleanup is able to delete leftover regions as well.
diff --git a/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml b/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml
new file mode 100644
index 0000000..21b74a6
--- /dev/null
+++ b/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ Add basic read-only Placement client to Tempest to make possible the
+ testing of the placement based bandwidth allocation feature.
+ The following API calls are available for tempest from now:
+
+ * GET /allocation_candidates
+ * GET /allocations/{consumer_uuid}
+
+ Add new config group ``placement``, with the config options:
+
+ * ``endpoint_type`` to use for communication with placement service.
+ * ``catalog_type`` of the placement service.
+ * ``region`` as the placement region name to use.
+ * ``min_microversion`` and ``max_microversion`` as the range between
+ placement API requests are sent.
diff --git a/releasenotes/notes/QoS-client-for-placement-based-minimum-bw-allocation-8e5854d5754cec68.yaml b/releasenotes/notes/QoS-client-for-placement-based-minimum-bw-allocation-8e5854d5754cec68.yaml
new file mode 100644
index 0000000..b66ea3a
--- /dev/null
+++ b/releasenotes/notes/QoS-client-for-placement-based-minimum-bw-allocation-8e5854d5754cec68.yaml
@@ -0,0 +1,25 @@
+---
+features:
+ - |
+ Add ``qos-policies`` and ``qos-minimum-bandwidth-rule`` clients
+ to Tempest to make possible the testing of the placement based
+ bandwidth allocation feature.
+ The following API calls are available for tempest from now:
+
+ ``QoS policies`` client:
+
+ * GET /qos/policies
+ * POST /qos/policies
+ * GET /qos/policies/{policy_id}
+ * PUT /qos/policies/{policy_id}
+ * DELETE /qos/policies/{policy_id}
+
+
+ ``QoS minimum bandwidth rules`` client:
+
+ * GET qos/policies/{policy_id}/minimum_bandwidth_rules
+ * POST /qos/policies/{policy_id}/minimum_bandwidth_rules
+ * GET qos/policies/{policy_id}/minimum_bandwidth_rules/{rule_id}
+ * PUT qos/policies/{policy_id}/minimum_bandwidth_rules/{rule_id}
+ * DELETE /qos/policies/{policy_id}/minimum_bandwidth_rules/{rule_id}
+
diff --git a/releasenotes/notes/account_generator-6eb03f664a448c35.yaml b/releasenotes/notes/account_generator-6eb03f664a448c35.yaml
new file mode 100644
index 0000000..ade632f
--- /dev/null
+++ b/releasenotes/notes/account_generator-6eb03f664a448c35.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove the deprecated CLI ``tempest-account-generator`` in favor of
+ ``tempest account-generator`` command.
+ You can use ``tempest account-generator`` CLI to generate the accounts
+ yaml file.
diff --git a/releasenotes/notes/add-consistency-group-exceptions-01cbb792cd710231.yaml b/releasenotes/notes/add-consistency-group-exceptions-01cbb792cd710231.yaml
new file mode 100644
index 0000000..e879c2c
--- /dev/null
+++ b/releasenotes/notes/add-consistency-group-exceptions-01cbb792cd710231.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixed bug #1858417. Adding consistency group exceptions
+ ``ConsistencyGroupException`` and ``ConsistencyGroupSnapshotException``
+ that didn't exist before and caused failure in cinder-tempest-plugin.
diff --git a/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml b/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
index 87a6af9..85e5f5f 100644
--- a/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
+++ b/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
@@ -3,10 +3,10 @@
- |
Allow to specify new domain parameters:
- * `user_domain_name`
- * `user_domain_id`
- * `project_domain_name`
- * `project_domain_id`
+ * ``user_domain_name``
+ * ``user_domain_id``
+ * ``project_domain_name``
+ * ``project_domain_id``
for CLIClient class, whose values will be substituted to
``--os-user-domain-name``, ``--os-user-domain-id``,
diff --git a/releasenotes/notes/add-immutable-user-source-support-dd17772a997075e0.yaml b/releasenotes/notes/add-immutable-user-source-support-dd17772a997075e0.yaml
new file mode 100644
index 0000000..931d689
--- /dev/null
+++ b/releasenotes/notes/add-immutable-user-source-support-dd17772a997075e0.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Add a new config setting ``immutable_user_source`` in the
+ ``[identity-feature-enabled]`` group that defaults to false.
+ This setting, combined with the usage of the ``@testtools.skipIf()``
+ decorator, will allow tests that require user creation, deletion,
+ or modification to skip instead of failing in environments that
+ are LDAP-backed. In such environments, the user source is read-only,
+ so this feature flag is needed to allow such tests to gracefully skip
+ without having to blacklist them.
diff --git a/releasenotes/notes/add-migrate-volume-and-list-hosts-to-v3-volume-client-library-ad3529260db58f00.yaml b/releasenotes/notes/add-migrate-volume-and-list-hosts-to-v3-volume-client-library-ad3529260db58f00.yaml
new file mode 100644
index 0000000..ca6a78d
--- /dev/null
+++ b/releasenotes/notes/add-migrate-volume-and-list-hosts-to-v3-volume-client-library-ad3529260db58f00.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Add list host API support to the volume v3 client library.
+ This feature enables callers to list all hosts for a given project.
+ - |
+ Add migrate volume API support to the volume v3 client library.
+ This features allows callers to migrate volumes between backends.
diff --git a/releasenotes/notes/add-profiler-config-options-db7c4ae6d338ee5c.yaml b/releasenotes/notes/add-profiler-config-options-db7c4ae6d338ee5c.yaml
new file mode 100644
index 0000000..2245044
--- /dev/null
+++ b/releasenotes/notes/add-profiler-config-options-db7c4ae6d338ee5c.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Add support of `OSProfiler library`_ for profiling and distributed
+ tracing of OpenStack. A new config option ``key`` in section ``profiler``
+ is added, the option sets the secret key used to enable profiling in
+ OpenStack services. The value needs to correspond to the one specified
+ in [profiler]/hmac_keys option of OpenStack services.
+
+ .. _OSProfiler library: https://docs.openstack.org/osprofiler/
diff --git a/releasenotes/notes/add-redirect-param-bea1f6fbce629c70.yaml b/releasenotes/notes/add-redirect-param-bea1f6fbce629c70.yaml
new file mode 100644
index 0000000..f245dcb
--- /dev/null
+++ b/releasenotes/notes/add-redirect-param-bea1f6fbce629c70.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ A new parameter ``follow_redirects`` has been added to the class
+ ``RestClient``, which is passed through to ``ClosingHttp`` or
+ ``ClosingProxyHttp`` respectively. The default value is ``True``
+ which corresponds to the previous behaviour of following up to five
+ redirections before returning a response. Setting
+ ``follow_redirects = False`` allows to disable this behaviour, so
+ that any redirect that is received is directly returned to the caller.
+ This allows tests to verify that an API is responding with a redirect.
+fixes:
+ - |
+ [`bug 1616892 <https://bugs.launchpad.net/tempest/+bug/1616892>`_]
+ Tempest now allows tests to verify that an API responds with a
+ redirect.
diff --git a/releasenotes/notes/add-show-api-v3-details-api-to-v3-versions-client-4b408427379cabfe.yaml b/releasenotes/notes/add-show-api-v3-details-api-to-v3-versions-client-4b408427379cabfe.yaml
new file mode 100644
index 0000000..50f10fa
--- /dev/null
+++ b/releasenotes/notes/add-show-api-v3-details-api-to-v3-versions-client-4b408427379cabfe.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Add show api version details function to v3
+ versions_client library for cinder.
+
+ * show_version
\ No newline at end of file
diff --git a/releasenotes/notes/add-show-default-group-type-detail-api-to-v3-group-types-client-65f717878cf52da0.yaml b/releasenotes/notes/add-show-default-group-type-detail-api-to-v3-group-types-client-65f717878cf52da0.yaml
new file mode 100644
index 0000000..1419e66
--- /dev/null
+++ b/releasenotes/notes/add-show-default-group-type-detail-api-to-v3-group-types-client-65f717878cf52da0.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show default group type details API to v3 group_types_client library.
+
+ * show_default_group_type
diff --git a/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml b/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml
new file mode 100644
index 0000000..a1bd4c5
--- /dev/null
+++ b/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - A new config option 'subnet_id' is added to section
+ 'network' to specify subnet which should be used for
+ allocation of IPs for VMs created during testing.
+ It should be used when the tested network contains more
+ than one subnet otherwise test of external connectivity
+ will fail. (Fixes bug #1856671)
diff --git a/releasenotes/notes/add-unstable_test-decorator-a73cf97d4ffcc796.yaml b/releasenotes/notes/add-unstable_test-decorator-a73cf97d4ffcc796.yaml
new file mode 100644
index 0000000..2203fd1
--- /dev/null
+++ b/releasenotes/notes/add-unstable_test-decorator-a73cf97d4ffcc796.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ New decorator ``unstable_test`` is added to ``tempest.lib.decorators``.
+ It can be used to mark some test as unstable thus it will be still run
+ by tempest but job will not fail if this test will fail. Such test will
+ be skipped in case of failure.
+ It can be used for example when there is known bug related which cause
+ irregular tests failures. Marking such test as unstable will help other
+ developers to get their job done and still run this test to get additional
+ debug data or to confirm if some potential fix really solved the issue.
diff --git a/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml b/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml
new file mode 100644
index 0000000..6b10937
--- /dev/null
+++ b/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Add the option --worker-file in ``tempest run`` command. This is to give
+ tempest more granularity to manually configure how the different sets of
+ tests can be grouped to run with the different worker. You can configure
+ tests regex to run under workers. You can also mix manual scheduling with
+ standard one by mentioning concurrency.
+ For example, the user can setup tempest to run with different concurrences,
+ to be used with different regexps.
diff --git a/releasenotes/notes/agents-client-delete-method-de1a7fb3f845999c.yaml b/releasenotes/notes/agents-client-delete-method-de1a7fb3f845999c.yaml
new file mode 100644
index 0000000..21068ec
--- /dev/null
+++ b/releasenotes/notes/agents-client-delete-method-de1a7fb3f845999c.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds the new method to AgentsClient that implements agent deletion
+ according to the API [0].
+ [0] https://developer.openstack.org/api-ref/network/v2/index.html#delete-agent
+
diff --git a/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml b/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml
new file mode 100644
index 0000000..384f916
--- /dev/null
+++ b/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new parameter, compute/compute_volume_common_az is introduced to
+ specify availability zone where tempest creates instances and volumes
+ for scenario tests, to allow us to run scenario tests in the deployment
+ which has multiple availability zones and cinder/cross_az_attach in
+ nova.conf is set to False.
diff --git a/releasenotes/notes/bug-1791007-328a8b9a43bfb157.yaml b/releasenotes/notes/bug-1791007-328a8b9a43bfb157.yaml
new file mode 100644
index 0000000..a2e23fd
--- /dev/null
+++ b/releasenotes/notes/bug-1791007-328a8b9a43bfb157.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fixed bug #1791007. ``tempest workspace register`` and ``tempest workspace rename`` CLI will
+ error if None or empty string is passed in --name arguments. Earlier both CLI used to accept
+ the None or empty string as name which was confusing.
+
+
diff --git a/releasenotes/notes/bug-1799883-6ea95fc64f70c9ef.yaml b/releasenotes/notes/bug-1799883-6ea95fc64f70c9ef.yaml
new file mode 100644
index 0000000..630908f
--- /dev/null
+++ b/releasenotes/notes/bug-1799883-6ea95fc64f70c9ef.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixed bug #1799883. ``tempest workspace register`` and ``tempest workspace move`` CLI
+ will now validate the value of the ``--path`` CLI argument and raise an exception if
+ it is None or empty string. Earlier both CLI actions were accepting None or empty path
+ which was confusing.
diff --git a/releasenotes/notes/bug-1808473-54ada26ab78e7b02.yaml b/releasenotes/notes/bug-1808473-54ada26ab78e7b02.yaml
new file mode 100644
index 0000000..c280198
--- /dev/null
+++ b/releasenotes/notes/bug-1808473-54ada26ab78e7b02.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixed bug #1808473. ``tempest run`` CLI will error if a non-exist config file is
+ input to parameter --config-file. Earlier non-exist config value was silently
+ getting ignored and the default config file was used instead which used to give
+ false behavior to the user on using the passed config file.
diff --git a/releasenotes/notes/change-default-region-for-placement-to-empty-string-394f1132c28345bb.yaml b/releasenotes/notes/change-default-region-for-placement-to-empty-string-394f1132c28345bb.yaml
new file mode 100644
index 0000000..a28e4e2
--- /dev/null
+++ b/releasenotes/notes/change-default-region-for-placement-to-empty-string-394f1132c28345bb.yaml
@@ -0,0 +1,13 @@
+---
+upgrade:
+ - |
+ Default value of config option ``CONF.placement.region`` is updated
+ from ``RegionOne`` to empty string.
+
+ As per tempest design, if tempest conf is not having any region for
+ a service then identity region should be used. In case of placement
+ the default value is "RegionOne" which is considered as placement
+ region if region missing in tempest conf. In order to have identity
+ region to be used as default we need to change to empty string for
+ placement service. Empty string can be seen being used in other
+ services like volume, image etc.
diff --git a/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml b/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml
new file mode 100644
index 0000000..c707f14
--- /dev/null
+++ b/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The ``tempest.lib.decorators.attr`` decorator now supports a ``condition``
+ kwarg which can be used to conditionally apply the attr to the test
+ function if the condition evaluates to True.
diff --git a/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml b/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml
new file mode 100644
index 0000000..8475f50
--- /dev/null
+++ b/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml
@@ -0,0 +1,8 @@
+---
+other:
+ - |
+ New configuration options ``[compute]/certified_image_ref`` and
+ ``[compute]/certified_image_trusted_certs`` have been introduced. These
+ are required in order to run the ``ServerShowV263Test`` test and allow a
+ signed image with the required img_signature_* properties set along
+ with a list of trusted certificates to be used during the test.
diff --git a/releasenotes/notes/correct-port-profile-config-option-d67f5cb31f1bc34c.yaml b/releasenotes/notes/correct-port-profile-config-option-d67f5cb31f1bc34c.yaml
new file mode 100644
index 0000000..2830aa2
--- /dev/null
+++ b/releasenotes/notes/correct-port-profile-config-option-d67f5cb31f1bc34c.yaml
@@ -0,0 +1,17 @@
+---
+fixes:
+ - |
+ Patch https://review.opendev.org/#/c/499575/ introduced
+ support creating Neutron port with certain capabilities.
+ Currently capabilities list interpreted as string this change
+ fix it.
+
+ tempest.conf
+ [network]
+ port_profile = capabilities:[switchdev]
+
+ result:
+ {'capabilities':'[switchdev]'}
+
+ expected:
+ {'capabilities': ['switchdev']}
diff --git a/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml b/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml
new file mode 100644
index 0000000..6bbb381
--- /dev/null
+++ b/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml
@@ -0,0 +1,8 @@
+---
+deprecations:
+ - |
+ The ``[compute-feature-enabled]/block_migrate_cinder_iscsi`` is deprecated
+ ahead of removal in a future release. Once removed the
+ ``[compute-feature-enabled]/block_migration_for_live_migration``
+ configurable will then be used to determine when to run block migration
+ based tests during live migration.
diff --git a/releasenotes/notes/deprecate-dns_servers-option-0xf2f297ee47a5ff.yaml b/releasenotes/notes/deprecate-dns_servers-option-0xf2f297ee47a5ff.yaml
new file mode 100644
index 0000000..30551cb
--- /dev/null
+++ b/releasenotes/notes/deprecate-dns_servers-option-0xf2f297ee47a5ff.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The config option ``CONF.network.dns_servers`` is no longer used
+ anywhere, so it is deprecated and will be removed in the future.
+
diff --git a/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml b/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml
new file mode 100644
index 0000000..d0c3a7d
--- /dev/null
+++ b/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+ - |
+ The ``scheduler_available_filters`` option is being deprecated in favor of
+ ``scheduler_enabled_filters``. The new name is more indicative of what the
+ option means. ``scheduler_enabled_filters``'s default value is set to the
+ default value of Nova's ``enabled_filters``.
+ ``scheduler_available_filters``'s default was `all`. There was confusion
+ around this value. Sometimes it was understood to mean the default Nova
+ filters are enabled, other times it was understood to mean all filters are
+ enabled. While `all` is still allowed for ``scheduler_enabled_filters`` for
+ backwards compatibility, it is strongly recommended to provide an explicit
+ list of filters that matches what's configured in nova.conf.
diff --git a/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
new file mode 100644
index 0000000..58b161f
--- /dev/null
+++ b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The config options ``CONF.compute.spice_console`` and ``CONF.compute.rdp_console``
+ are deprecated because test cases using them are removed.
+ We can add them back when adding the test cases again.
diff --git a/releasenotes/notes/deprecate-vnc-server-header-529f07d592aefb62.yaml b/releasenotes/notes/deprecate-vnc-server-header-529f07d592aefb62.yaml
new file mode 100644
index 0000000..d7e144d
--- /dev/null
+++ b/releasenotes/notes/deprecate-vnc-server-header-529f07d592aefb62.yaml
@@ -0,0 +1,12 @@
+---
+deprecations:
+ - |
+ The config option ``CONF.compute.vnc_server_header`` is deprecated because
+ it has become obsolete with the usage of different response header fields
+ to accomplish the same goal in accordance with RFC7231 Section 6.2.2.
+
+fixes:
+ - |
+ Adequately validates WebSocket upgrade in test_novnc and removes unneeded
+ configuration complexity. Closes bug #1838777 and #1840788.
+
diff --git a/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml b/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml
new file mode 100644
index 0000000..a0ac244
--- /dev/null
+++ b/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Python 2.7 support has been dropped. Last release of Tempest
+ to support python 2.7 is Temepst 23.0.0. The minimum version of Python now
+ supported by Tempest is Python 3.6.
diff --git a/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml b/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml
new file mode 100644
index 0000000..99ef31e
--- /dev/null
+++ b/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml
@@ -0,0 +1,8 @@
+---
+prelude: >
+ Remove the support of python3.5.
+ Tempest, its plugins dependencies in ussuri cycle
+ are python-requires>=py3.6 which makes distro not
+ having python 3.6 to do hack to install py3.6 etc.
+ It time to drop the py3.5 from Tempest. Last supported
+ version of Tempest for py3.5 is 23.0.0.
diff --git a/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml b/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml
new file mode 100644
index 0000000..0959b22
--- /dev/null
+++ b/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - |
+ The ``tempest-full``, ``tempest-full-py3`` and ``tempest-slow`` zuul v3
+ job configurations now set ``ENABLE_VOLUME_MULTIATTACH: true`` in the
+ ``devstack_localrc`` variables section. If you have a plugin job
+ configuration that inherits from one of these jobs and the backend cinder
+ volume driver or nova compute driver do not support volume multiattach then
+ you should override and set this variable to
+ ``ENABLE_VOLUME_MULTIATTACH: false`` in your job configuration.
diff --git a/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml b/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
new file mode 100644
index 0000000..4842f63
--- /dev/null
+++ b/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Bug#1847749. This provides the workaround of Skip Exception raised instead of skipping
+ the CLI tests. If you are running Tempest with stestr > 2.5.0 then use this fix.
+ Ref- https://github.com/testing-cabal/testtools/issues/272
diff --git a/releasenotes/notes/fix-credential-logging-98089c897d801355.yaml b/releasenotes/notes/fix-credential-logging-98089c897d801355.yaml
new file mode 100644
index 0000000..9534a72
--- /dev/null
+++ b/releasenotes/notes/fix-credential-logging-98089c897d801355.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ A new kwarg, ``log_req_body``, was added to the
+ ``tempest.lib.common.rest_client.RestClient`` method ``raw_request()``.
+ This kwarg takes in a string which will be used in place of the request
+ body, which is logged by default. The intent of this option is to be used
+ for security reasons to avoid logging sensitive information that are part
+ of request bodies.
diff --git a/releasenotes/notes/intermediate-train-release-0146c624cff9a57f.yaml b/releasenotes/notes/intermediate-train-release-0146c624cff9a57f.yaml
new file mode 100644
index 0000000..7940846
--- /dev/null
+++ b/releasenotes/notes/intermediate-train-release-0146c624cff9a57f.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+ This is an intermediate release during the Train development cycle to
+ mark the end of support for Pike in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Stein
+ * Rocky
+ * Queens
+
+ Current development of Tempest is for OpenStack Train development
+ cycle.
diff --git a/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml b/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
new file mode 100644
index 0000000..56f160a
--- /dev/null
+++ b/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
@@ -0,0 +1,14 @@
+---
+prelude: |
+ This is an intermediate release during the Ussuri development cycle to
+ mark the end of support for EM Queens in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Train
+ * Stein
+ * Rocky
+
+ Current development of Tempest is for OpenStack Ussuri development
+ cycle.
+
+ This is the last release of Tempest to officially support python2.7.
diff --git a/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml b/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml
new file mode 100644
index 0000000..a058137
--- /dev/null
+++ b/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new attachments client library has been introduced for the volume
+ service.
+
+ Initially only the show_attachment API is provided. This API requires a
+ minimum volume API microversion of ``3.27``.
diff --git a/releasenotes/notes/lib_api_microversion_fixture-f52308fc6b6b89f2.yaml b/releasenotes/notes/lib_api_microversion_fixture-f52308fc6b6b89f2.yaml
new file mode 100644
index 0000000..d707fc7
--- /dev/null
+++ b/releasenotes/notes/lib_api_microversion_fixture-f52308fc6b6b89f2.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ New library interface to set the API microversion on Service Clients.
+ ``APIMicroversionFixture,`` can be used to set the API microversion
+ on multiple services. This Fixture will take care of reseting the service
+ microversion to None once test is finished.
diff --git a/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml b/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
index 2bba952..37eeec5 100644
--- a/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
+++ b/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
@@ -6,6 +6,6 @@
other projects can use these modules as stable libraries
without any maintenance changes.
- * account_client
- * container_client
- * object_client
+ * account_client
+ * container_client
+ * object_client
diff --git a/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml b/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml
index 72b8e26..00829b9 100644
--- a/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml
+++ b/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml
@@ -6,5 +6,5 @@
other projects can use these modules as stable libraries
without any maintenance changes.
- * bulk_middleware_client
- * capabilities_client
+ * bulk_middleware_client
+ * capabilities_client
diff --git a/releasenotes/notes/network-show-version-18e1707a4df0a3d3.yaml b/releasenotes/notes/network-show-version-18e1707a4df0a3d3.yaml
new file mode 100644
index 0000000..36a9710
--- /dev/null
+++ b/releasenotes/notes/network-show-version-18e1707a4df0a3d3.yaml
@@ -0,0 +1,7 @@
+---
+features:
+- |
+ Add ``show_version`` function to the ``NetworkVersionsClient`` client. This
+ allows the possibility of getting details for Networking API.
+
+ .. API reference: https://developer.openstack.org/api-ref/network/v2/index.html#show-api-v2-details
diff --git a/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml b/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml
new file mode 100644
index 0000000..936bf1f
--- /dev/null
+++ b/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Remove the deprecated argument ``os-tenant-name`` or ``OS_TENANT_NAME`` in favour of
+ ``os-project-name`` argument.
diff --git a/releasenotes/notes/remove-deprecated-find-test-caller-f4525cd043bfd1b6.yaml b/releasenotes/notes/remove-deprecated-find-test-caller-f4525cd043bfd1b6.yaml
new file mode 100644
index 0000000..f22736f
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-find-test-caller-f4525cd043bfd1b6.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ ``tempest.lib.common.utils.misc.find_test_caller`` was deprecated during
+ Kilo release cycle in favor of
+ ``tempest.lib.common.utils.test_utils.find_test_caller``. The deprecated
+ version of ``find_test_caller`` is removed.
diff --git a/releasenotes/notes/remove-some-deprecated-auth-and-identity-options-xa1xd9b8fb948g4f.yaml b/releasenotes/notes/remove-some-deprecated-auth-and-identity-options-xa1xd9b8fb948g4f.yaml
new file mode 100644
index 0000000..fa21afd
--- /dev/null
+++ b/releasenotes/notes/remove-some-deprecated-auth-and-identity-options-xa1xd9b8fb948g4f.yaml
@@ -0,0 +1,8 @@
+upgrade:
+ - |
+ Remove deprecated config option ``endpoint_type`` from
+ ``identity`` group. Use ``v2_public_endpoint_type`` from
+ ``identity`` group instead.
+ Remove deprecated config option ``tenant_isolation_domain_name``
+ from ``auth`` group. Use ``default_credentials_domain_name`` from
+ ``auth`` group instead.
diff --git a/releasenotes/notes/remove-some-deprecated-identity-options-0ffxd1b8db928e43.yaml b/releasenotes/notes/remove-some-deprecated-identity-options-0ffxd1b8db928e43.yaml
new file mode 100644
index 0000000..e9e9444
--- /dev/null
+++ b/releasenotes/notes/remove-some-deprecated-identity-options-0ffxd1b8db928e43.yaml
@@ -0,0 +1,11 @@
+upgrade:
+ - |
+ Remove deprecated config option ``admin_username`` from
+ ``identity`` groups. Use ``admin_username`` from ``auth`` instead.
+ Remove deprecated config option ``admin_tenant_name`` from
+ ``auth`` and ``identity`` groups. Use ``admin_project_name`` from
+ ``auth`` instead.
+ Remove deprecated config option ``admin_password`` from
+ ``identity`` groups. Use ``admin_password`` from ``auth`` instead.
+ Remove deprecated config option ``admin_domain_name`` from
+ ``identity`` groups. Use ``admin_domain_name`` from ``auth`` instead.
\ No newline at end of file
diff --git a/releasenotes/notes/segments-client-866f02948f40d4ff.yaml b/releasenotes/notes/segments-client-866f02948f40d4ff.yaml
new file mode 100644
index 0000000..90ac3e8
--- /dev/null
+++ b/releasenotes/notes/segments-client-866f02948f40d4ff.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ Add ``segments`` client to Tempest to make possible the testing of the
+ Routed Provider Networks feature.
+ The following API calls are available for tempest from now:
+
+ * POST /segments
+ * PUT /segments/{segment_id}
+ * GET /segment/{segment_id}
+ * DELETE /segments/{segment_id}
+ * GET /segments
diff --git a/releasenotes/notes/subunit_describe_calls-ad7df689b9d63e3f.yaml b/releasenotes/notes/subunit_describe_calls-ad7df689b9d63e3f.yaml
new file mode 100644
index 0000000..e7fc3a0
--- /dev/null
+++ b/releasenotes/notes/subunit_describe_calls-ad7df689b9d63e3f.yaml
@@ -0,0 +1,8 @@
+---
+deprecations:
+ - |
+ Deprecated command for subunit-describe-calls
+
+features:
+ - |
+ Added new tempest subcommand for subunit-describe-calls
diff --git a/releasenotes/notes/support-microversion-in-scenario-test-b4fbfdd3a977fc58.yaml b/releasenotes/notes/support-microversion-in-scenario-test-b4fbfdd3a977fc58.yaml
new file mode 100644
index 0000000..4d0a3dd
--- /dev/null
+++ b/releasenotes/notes/support-microversion-in-scenario-test-b4fbfdd3a977fc58.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Add microversion support for scenario tests. Scenario test calls
+ multiple service API within same test and many services like compute,
+ volume and placement etc support API microversion. With microversion
+ support in scenario test, we can call different service API with
+ different microvesion. Which means we can implement scenario tests
+ for microversion also.
+ Currently Scenario manager support below services microversion:
+
+ * Compute
+ * Volume
+ * Placement
diff --git a/releasenotes/notes/tempest-rocky-release-0fc3312053923380.yaml b/releasenotes/notes/tempest-rocky-release-0fc3312053923380.yaml
index e9c77a6..8406be6 100644
--- a/releasenotes/notes/tempest-rocky-release-0fc3312053923380.yaml
+++ b/releasenotes/notes/tempest-rocky-release-0fc3312053923380.yaml
@@ -1,12 +1,12 @@
---
-prelude: >
+prelude: |
This release is to tag the Tempest for OpenStack Rocky release.
After this release, Tempest will support below OpenStack Releases:
- * Rocky
- * Queens
- * Pike
- * Ocata
+ * Rocky
+ * Queens
+ * Pike
+ * Ocata
Current development of Tempest is for OpenStack Stein development
cycle. Every Tempest commit is also tested against master during
diff --git a/releasenotes/notes/tempest-stein-release-18bad34136a2e6ef.yaml b/releasenotes/notes/tempest-stein-release-18bad34136a2e6ef.yaml
new file mode 100644
index 0000000..c3537fc
--- /dev/null
+++ b/releasenotes/notes/tempest-stein-release-18bad34136a2e6ef.yaml
@@ -0,0 +1,18 @@
+---
+prelude: |
+ This release is to tag the Tempest for OpenStack Stein release.
+ This release marks the start of Stein release support in Tempest and
+ the end of support for Ocata in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Stein
+ * Rocky
+ * Queens
+ * Pike
+
+ Current development of Tempest is for OpenStack Train development
+ cycle. Every Tempest commit is also tested against master during
+ the Train cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Train (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Stein release.
diff --git a/releasenotes/notes/tempest-train-release-a2ed0743a5eadeb6.yaml b/releasenotes/notes/tempest-train-release-a2ed0743a5eadeb6.yaml
new file mode 100644
index 0000000..960b0b2
--- /dev/null
+++ b/releasenotes/notes/tempest-train-release-a2ed0743a5eadeb6.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+ This release is to tag the Tempest for OpenStack Train release.
+ This release marks the start of Train release support in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Train
+ * Stein
+ * Rocky
+ * Queens
+
+ Current development of Tempest is for OpenStack Ussuri development
+ cycle. Every Tempest commit is also tested against master during
+ the Ussuri cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Ussuri (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Train release.
diff --git a/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml b/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml
new file mode 100644
index 0000000..37e56bb
--- /dev/null
+++ b/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml
@@ -0,0 +1,16 @@
+---
+prelude: >
+ This release is to tag the Tempest for OpenStack Ussuri release.
+ This release marks the start of Ussuri release support in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Ussuri
+ * Train
+ * Stein
+
+ Current development of Tempest is for OpenStack Victoria development
+ cycle. Every Tempest commit is also tested against master during
+ the Victoria cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Ussuri (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Ussuri release.
diff --git a/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml b/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml
new file mode 100644
index 0000000..ce401ff
--- /dev/null
+++ b/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove the deprecated CLI ``verify-tempest-config`` in favour of
+ ``tempest verify-config`` command.
+ You can use ``tempest verify-config`` CLI to verify the tempest
+ conf file.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 57ec7e1..71df749 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -42,12 +42,9 @@
]
# openstackdocstheme options
-repository_name = 'openstack/tempest'
-bug_project = 'tempest'
-bug_tag = ''
-
-# Must set this variable to include year, month, day, hours, and minutes.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
+openstackdocs_repo_name = 'openstack/tempest'
+openstackdocs_bug_project = 'tempest'
+openstackdocs_bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -62,7 +59,6 @@
master_doc = 'index'
# General information about the project.
-project = u'tempest Release Notes'
copyright = u'2016, tempest Developers'
# Release do not need a version number in the title, they
@@ -193,17 +189,6 @@
# -- Options for LaTeX output ---------------------------------------------
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- # 'preamble': '',
-}
-
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 3be014f..d8702f9 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,12 @@
:maxdepth: 1
unreleased
+ v24.0.0
+ v23.0.0
+ v22.1.0
+ v22.0.0
+ v21.0.0
+ v20.0.0
v19.0.0
v18.0.0
v17.2.0
diff --git a/releasenotes/source/v20.0.0.rst b/releasenotes/source/v20.0.0.rst
new file mode 100644
index 0000000..28c5431
--- /dev/null
+++ b/releasenotes/source/v20.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v20.0.0 Release Notes
+=====================
+
+.. release-notes:: 20.0.0 Release Notes
+ :version: 20.0.0
diff --git a/releasenotes/source/v21.0.0.rst b/releasenotes/source/v21.0.0.rst
new file mode 100644
index 0000000..9ea8120
--- /dev/null
+++ b/releasenotes/source/v21.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v21.0.0 Release Notes
+=====================
+
+.. release-notes:: 21.0.0 Release Notes
+ :version: 21.0.0
diff --git a/releasenotes/source/v22.0.0.rst b/releasenotes/source/v22.0.0.rst
new file mode 100644
index 0000000..519b081
--- /dev/null
+++ b/releasenotes/source/v22.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v22.0.0 Release Notes
+=====================
+
+.. release-notes:: 22.0.0 Release Notes
+ :version: 22.0.0
diff --git a/releasenotes/source/v22.1.0.rst b/releasenotes/source/v22.1.0.rst
new file mode 100644
index 0000000..6a4fd1f
--- /dev/null
+++ b/releasenotes/source/v22.1.0.rst
@@ -0,0 +1,6 @@
+=====================
+v22.1.0 Release Notes
+=====================
+
+.. release-notes:: 22.1.0 Release Notes
+ :version: 22.1.0
diff --git a/releasenotes/source/v23.0.0.rst b/releasenotes/source/v23.0.0.rst
new file mode 100644
index 0000000..7c5edf8
--- /dev/null
+++ b/releasenotes/source/v23.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v23.0.0 Release Notes
+=====================
+
+.. release-notes:: 23.0.0 Release Notes
+ :version: 23.0.0
diff --git a/releasenotes/source/v24.0.0.rst b/releasenotes/source/v24.0.0.rst
new file mode 100644
index 0000000..8131975
--- /dev/null
+++ b/releasenotes/source/v24.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v24.0.0 Release Notes
+=====================
+
+.. release-notes:: 24.0.0 Release Notes
+ :version: 24.0.0
diff --git a/requirements.txt b/requirements.txt
index 7520d42..bf38fae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
cliff!=2.9.0,>=2.8.0 # Apache-2.0
-jsonschema<3.0.0,>=2.6.0 # MIT
+jsonschema>=2.6.0 # MIT
testtools>=2.2.0 # MIT
paramiko>=2.0.0 # LGPLv2.1+
netaddr>=0.7.18 # BSD
diff --git a/roles/ipv6-only-deployments-verification/README.rst b/roles/ipv6-only-deployments-verification/README.rst
new file mode 100644
index 0000000..400a8da
--- /dev/null
+++ b/roles/ipv6-only-deployments-verification/README.rst
@@ -0,0 +1,16 @@
+Verify the IPv6-only deployments
+
+This role needs to be invoked from a playbook that
+run tests. This role verifies the IPv6 setting on
+devstack side and devstack deploy services on IPv6.
+This role is invoked before tests are run so that
+if any missing IPv6 setting or deployments can fail
+the job early.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/ipv6-only-deployments-verification/defaults/main.yaml b/roles/ipv6-only-deployments-verification/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/ipv6-only-deployments-verification/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/ipv6-only-deployments-verification/tasks/main.yaml b/roles/ipv6-only-deployments-verification/tasks/main.yaml
new file mode 100644
index 0000000..d73c79c
--- /dev/null
+++ b/roles/ipv6-only-deployments-verification/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Verify the ipv6-only deployments
+ become: true
+ become_user: stack
+ shell: "{{ devstack_base_dir }}/tempest/tools/verify-ipv6-only-deployments.sh"
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
index 3724e0e..e3a0a0e 100644
--- a/roles/process-stackviz/tasks/main.yaml
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -17,13 +17,18 @@
when: not subunit_input.stat.exists
- name: Install stackviz
- pip:
- name: "file://{{ stackviz_archive.stat.path }}"
- virtualenv: /tmp/stackviz
- extra_args: -U
when:
- stackviz_archive.stat.exists
- subunit_input.stat.exists
+ block:
+ - include_role:
+ name: ensure-pip
+
+ - pip:
+ name: "file://{{ stackviz_archive.stat.path }}"
+ virtualenv: /tmp/stackviz
+ virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
+ extra_args: -U
- name: Deploy stackviz static html+js
command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 71b8e4f..3643edb 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -1,5 +1,8 @@
Run Tempest
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
**Role Variables**
.. zuul:rolevar:: devstack_base_dir
@@ -20,14 +23,12 @@
It works only when used with some specific tox environments
('all', 'all-plugin'.)
- Multi-line and commented regexs can be achieved by doing this:
+ In the following example only api scenario and third party tests
+ will be executed.
::
vars:
- tempest_test_regex: |
- (?x) # Ignore comments and whitespaces
- # Line with only a comment.
- (tempest\.(api|scenario|thirdparty)).*$ # Run only api scenario and third party
+ tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
.. zuul:rolevar:: tempest_test_blacklist
@@ -48,11 +49,35 @@
It works only when used with some specific tox environments
('all', 'all-plugin'.)
- Multi-line and commented regexs can be achieved by doing this:
+ ::
+ vars:
+ tempest_black_regex: (tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+ :default: ''
+
+ String of extra command line options to pass to tox.
+
+ Here is an example of running tox with --sitepackages option:
::
vars:
- tempest_black_regex: |
- (?x) # Ignore comments and whitespaces
- # Line with only a comment.
- (tempest.api.identity).*$
+ tox_extra_args: --sitepackages
+
+.. zuul:rolevar:: tempest_test_timeout
+ :default: ''
+
+ The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+ :default: ''
+
+ Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+ :default: ''
+
+ Environment variable to set for run-tempst task.
+
+ Env variables set in this variable will be combined with some more
+ defaults env variable set at runtime.
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index c89eb93..5867b6c 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -2,3 +2,8 @@
tempest_test_regex: ''
tox_envlist: smoke
tempest_black_regex: ''
+tox_extra_args: ''
+tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 54ddc71..1de3725 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -20,6 +20,22 @@
default_concurrency: "{{ num_cores|int // 2 }}"
when: num_cores|int > 3
+- name: Override target branch
+ set_fact:
+ target_branch: "{{ zuul.override_checkout }}"
+ when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/rocky
+ set_fact:
+ # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+ when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky"]
+
+- name: Set OS_TEST_TIMEOUT if requested
+ set_fact:
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+ when: tempest_test_timeout != ''
+
- when:
- tempest_test_blacklist is defined
block:
@@ -35,10 +51,12 @@
when: blacklist_stat.stat.exists
- name: Run Tempest
- command: tox -e {{tox_envlist}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+ command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
--concurrency={{tempest_concurrency|default(default_concurrency)}} \
--black-regex={{tempest_black_regex|quote}}
args:
chdir: "{{devstack_base_dir}}/tempest"
+ register: tempest_run_result
become: true
become_user: tempest
+ environment: "{{ tempest_tox_environment }}"
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
new file mode 100644
index 0000000..70719ca
--- /dev/null
+++ b/roles/tempest-cleanup/README.rst
@@ -0,0 +1,33 @@
+Tempest cleanup
+===============
+
+Documentation regarding tempest cleanup can be found at the following
+link:
+https://docs.openstack.org/tempest/latest/cleanup.html
+
+When init_saved_state and dry_run variables are set to false, the role
+execution will run tempest cleanup which deletes resources not present in the
+saved_state.json file.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: init_saved_state
+ :default: false
+
+ When true, tempest cleanup --init-saved-state will be executed which
+ initializes the saved state of the OpenStack deployment and will output
+ a saved_state.json file containing resources from the deployment that will
+ be preserved from the cleanup command. This should be done prior to running
+ Tempest tests.
+
+.. zuul:rolevar:: dry_run
+ :default: false
+
+ When true, tempest cleanup creates a report (./dry_run.json) of the
+ resources that would be cleaned up if the role was ran with dry_run option
+ set to false.
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
new file mode 100644
index 0000000..fc1948a
--- /dev/null
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+init_saved_state: false
+dry_run: false
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
new file mode 100644
index 0000000..5444afc
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -0,0 +1,31 @@
+- when: init_saved_state
+ block:
+ - name: Run tempest cleanup init-saved-state
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --init-saved-state --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+
+ - name: Cat saved_state.json
+ command: cat "{{ devstack_base_dir }}/tempest/saved_state.json"
+
+- when: dry_run
+ block:
+ - name: Run tempest cleanup dry-run
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+
+ - name: Cat dry_run.json
+ command: cat "{{ devstack_base_dir }}/tempest/dry_run.json"
+
+- name: Run tempest cleanup
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+ when: not dry_run and not init_saved_state
diff --git a/setup.cfg b/setup.cfg
index 96ee7ea..18427a2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,8 +4,9 @@
description-file =
README.rst
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/tempest/latest/
+python-requires = >=3.6
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
@@ -13,10 +14,12 @@
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.5
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3 :: Only
+ Programming Language :: Python :: Implementation :: CPython
[files]
packages =
@@ -26,14 +29,13 @@
[entry_points]
console_scripts =
- verify-tempest-config = tempest.cmd.verify_tempest_config:main
- tempest-account-generator = tempest.cmd.account_generator:main
tempest = tempest.cmd.main:main
skip-tracker = tempest.lib.cmd.skip_tracker:main
check-uuid = tempest.lib.cmd.check_uuid:run
subunit-describe-calls = tempest.cmd.subunit_describe_calls:entry_point
tempest.cm =
account-generator = tempest.cmd.account_generator:TempestAccountGenerator
+ subunit-describe-calls = tempest.cmd.subunit_describe_calls:TempestSubunitDescribeCalls
init = tempest.cmd.init:TempestInit
cleanup = tempest.cmd.cleanup:TempestCleanup
list-plugins = tempest.cmd.list_plugins:TempestListPlugins
@@ -47,5 +49,3 @@
oslo.config.opts =
tempest.config = tempest.config:list_opts
-[wheel]
-universal = 1
diff --git a/setup.py b/setup.py
index 566d844..f63cc23 100644
--- a/setup.py
+++ b/setup.py
@@ -16,14 +16,6 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
diff --git a/tempest/README.rst b/tempest/README.rst
index a5f4a92..b345032 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -9,7 +9,7 @@
OpenStack clouds.
As such Tempest tests come in many flavors, each with their own rules
-and guidelines. Below is the overview of the Tempest respository structure
+and guidelines. Below is the overview of the Tempest repository structure
to make this clear.
.. code-block:: console
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index a6e0efa..d5adfed 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -144,6 +144,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
def test_aggregate_add_existent_host(self):
+ # Adding already existing host to aggregate should fail.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate = self._create_test_aggregate()
@@ -172,6 +173,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
def test_aggregate_remove_nonexistent_host(self):
+ # Removing not existing host from aggregate should fail.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.NotFound, self.client.remove_host,
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 1483c2e..f42f53a 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -46,6 +46,7 @@
@decorators.idempotent_id('8b4330e1-12c4-4554-9390-e6639971f086')
def test_create_flavor_with_int_id(self):
+ """Test creating flavor with id of type integer"""
flavor_id = data_utils.rand_int_id(start=1000)
new_flavor_id = self.create_flavor(ram=self.ram,
vcpus=self.vcpus,
@@ -55,6 +56,7 @@
@decorators.idempotent_id('94c9bb4e-2c2a-4f3c-bb1f-5f0daf918e6d')
def test_create_flavor_with_uuid_id(self):
+ """Test creating flavor with id of type uuid"""
flavor_id = data_utils.rand_uuid()
new_flavor_id = self.create_flavor(ram=self.ram,
vcpus=self.vcpus,
@@ -64,8 +66,11 @@
@decorators.idempotent_id('f83fe669-6758-448a-a85e-32d351f36fe0')
def test_create_flavor_with_none_id(self):
- # If nova receives a request with None as flavor_id,
- # nova generates flavor_id of uuid.
+ """Test creating flavor without id specified
+
+ If nova receives a request with None as flavor_id,
+ nova generates flavor_id of uuid.
+ """
flavor_id = None
new_flavor_id = self.create_flavor(ram=self.ram,
vcpus=self.vcpus,
@@ -75,8 +80,10 @@
@decorators.idempotent_id('8261d7b0-be58-43ec-a2e5-300573c3f6c5')
def test_create_flavor_verify_entry_in_list_details(self):
- # Create a flavor and ensure it's details are listed
- # This operation requires the user to have 'admin' role
+ """Create a flavor and ensure its details are listed
+
+ This operation requires the user to have 'admin' role
+ """
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
@@ -94,9 +101,10 @@
@decorators.idempotent_id('63dc64e6-2e79-4fdf-868f-85500d308d66')
def test_create_list_flavor_without_extra_data(self):
- # Create a flavor and ensure it is listed
- # This operation requires the user to have 'admin' role
+ """Create a flavor and ensure it is listed
+ This operation requires the user to have 'admin' role
+ """
def verify_flavor_response_extension(flavor):
# check some extensions for the flavor create/show/detail response
self.assertEqual(flavor['swap'], '')
@@ -134,10 +142,12 @@
@decorators.idempotent_id('be6cc18c-7c5d-48c0-ac16-17eaf03c54eb')
def test_list_non_public_flavor(self):
- # Create a flavor with os-flavor-access:is_public false.
- # The flavor should not be present in list_details as the
- # tenant is not automatically added access list.
- # This operation requires the user to have 'admin' role
+ """Create a flavor with os-flavor-access:is_public false.
+
+ The flavor should not be present in list_details as the
+ tenant is not automatically added access list.
+ This operation requires the user to have 'admin' role
+ """
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
@@ -156,7 +166,7 @@
@decorators.idempotent_id('bcc418ef-799b-47cc-baa1-ce01368b8987')
def test_create_server_with_non_public_flavor(self):
- # Create a flavor with os-flavor-access:is_public false
+ """Create a flavor with os-flavor-access:is_public false"""
flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
disk=self.disk,
is_public="False")
@@ -169,8 +179,10 @@
@decorators.idempotent_id('b345b196-bfbd-4231-8ac1-6d7fe15ff3a3')
def test_list_public_flavor_with_other_user(self):
- # Create a Flavor with public access.
- # Try to List/Get flavor with another user
+ """Create a Flavor with public access.
+
+ Try to List/Get flavor with another user
+ """
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
@@ -184,6 +196,7 @@
@decorators.idempotent_id('fb9cbde6-3a0e-41f2-a983-bdb0a823c44e')
def test_is_public_string_variations(self):
+ """Test creating public and non public flavors"""
flavor_name_not_public = data_utils.rand_name(self.flavor_name_prefix)
flavor_name_public = data_utils.rand_name(self.flavor_name_prefix)
@@ -215,6 +228,7 @@
@decorators.idempotent_id('3b541a2e-2ac2-4b42-8b8d-ba6e22fcd4da')
def test_create_flavor_using_string_ram(self):
+ """Test creating flavor with ram of type string"""
new_flavor_id = data_utils.rand_int_id(start=1000)
ram = "1024"
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 4d27a22..4c531b3 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -61,10 +61,13 @@
@decorators.idempotent_id('0b2f9d4b-1ca2-4b99-bb40-165d4bb94208')
def test_flavor_set_get_update_show_unset_keys(self):
- # Test to SET, GET, UPDATE, SHOW, UNSET flavor extra
- # spec as a user with admin privileges.
+ """Test flavor extra spec operations by admin user
+
+ Test to SET, GET, UPDATE, SHOW, UNSET flavor extra
+ spec as a user with admin privileges.
+ """
# Assigning extra specs values that are to be set
- specs = {"key1": "value1", "key2": "value2"}
+ specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
# SET extra specs to the flavor created in setUp
set_body = self.admin_flavors_client.set_flavor_extra_spec(
self.flavor['id'], **specs)['extra_specs']
@@ -74,30 +77,34 @@
self.flavor['id'])['extra_specs'])
self.assertEqual(get_body, specs)
- # UPDATE the value of the extra specs key1
- update_body = \
- self.admin_flavors_client.update_flavor_extra_spec(
- self.flavor['id'], "key1", key1="value")
- self.assertEqual({"key1": "value"}, update_body)
+ # UPDATE the value of the extra specs 'hw:numa_nodes'
+ update_body = self.admin_flavors_client.update_flavor_extra_spec(
+ self.flavor['id'], "hw:numa_nodes", **{'hw:numa_nodes': '2'})
+ self.assertEqual({'hw:numa_nodes': '2'}, update_body)
- # GET extra specs and verify the value of the key2
+ # GET extra specs and verify the value of the 'hw:cpu_policy'
# is the same as before
get_body = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor['id'])['extra_specs']
- self.assertEqual(get_body, {"key1": "value", "key2": "value2"})
+ self.assertEqual(
+ get_body, {'hw:numa_nodes': '2', 'hw:cpu_policy': 'shared'}
+ )
# UNSET extra specs that were set in this test
- self.admin_flavors_client.unset_flavor_extra_spec(self.flavor['id'],
- "key1")
- self.admin_flavors_client.unset_flavor_extra_spec(self.flavor['id'],
- "key2")
+ self.admin_flavors_client.unset_flavor_extra_spec(
+ self.flavor['id'], 'hw:numa_nodes'
+ )
+ self.admin_flavors_client.unset_flavor_extra_spec(
+ self.flavor['id'], 'hw:cpu_policy'
+ )
get_body = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor['id'])['extra_specs']
self.assertEmpty(get_body)
@decorators.idempotent_id('a99dad88-ae1c-4fba-aeb4-32f898218bd0')
def test_flavor_non_admin_get_all_keys(self):
- specs = {"key1": "value1", "key2": "value2"}
+ """Test non admin user getting all flavor extra spec keys"""
+ specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
self.admin_flavors_client.set_flavor_extra_spec(self.flavor['id'],
**specs)
body = (self.flavors_client.list_flavor_extra_specs(
@@ -108,11 +115,15 @@
@decorators.idempotent_id('12805a7f-39a3-4042-b989-701d5cad9c90')
def test_flavor_non_admin_get_specific_key(self):
+ """Test non admin user getting specific flavor extra spec key"""
+ specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
body = self.admin_flavors_client.set_flavor_extra_spec(
- self.flavor['id'], key1="value1", key2="value2")['extra_specs']
- self.assertEqual(body['key1'], 'value1')
- self.assertIn('key2', body)
+ self.flavor['id'], **specs
+ )['extra_specs']
+ self.assertEqual(body['hw:numa_nodes'], '1')
+ self.assertIn('hw:cpu_policy', body)
+
body = self.flavors_client.show_flavor_extra_spec(
- self.flavor['id'], 'key1')
- self.assertEqual(body['key1'], 'value1')
- self.assertNotIn('key2', body)
+ self.flavor['id'], 'hw:numa_nodes')
+ self.assertEqual(body['hw:numa_nodes'], '1')
+ self.assertNotIn('hw:cpu_policy', body)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 5cde39e..721acca 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -64,70 +64,82 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d')
def test_flavor_non_admin_set_keys(self):
- # Test to SET flavor extra spec as a user without admin privileges.
+ """Test to SET flavor extra spec as a user without admin privileges"""
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.set_flavor_extra_spec,
self.flavor['id'],
- key1="value1", key2="value2")
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'})
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1ebf4ef8-759e-48fe-a801-d451d80476fb')
def test_flavor_non_admin_update_specific_key(self):
- # non admin user is not allowed to update flavor extra spec
+ """non admin user is not allowed to update flavor extra spec"""
body = self.admin_flavors_client.set_flavor_extra_spec(
- self.flavor['id'], key1="value1", key2="value2")['extra_specs']
- self.assertEqual(body['key1'], 'value1')
+ self.flavor['id'],
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
+ )['extra_specs']
+ self.assertEqual(body['hw:numa_nodes'], '1')
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.
update_flavor_extra_spec,
self.flavor['id'],
- 'key1',
- key1='value1_new')
+ 'hw:numa_nodes',
+ **{'hw:numa_nodes': '1'})
@decorators.attr(type=['negative'])
@decorators.idempotent_id('28f12249-27c7-44c1-8810-1f382f316b11')
def test_flavor_non_admin_unset_keys(self):
+ """non admin user is not allowed to unset flavor extra spec"""
self.admin_flavors_client.set_flavor_extra_spec(
- self.flavor['id'], key1="value1", key2="value2")
+ self.flavor['id'],
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
+ )
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.unset_flavor_extra_spec,
self.flavor['id'],
- 'key1')
+ 'hw:numa_nodes')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('440b9f3f-3c7f-4293-a106-0ceda350f8de')
def test_flavor_unset_nonexistent_key(self):
+ """Unsetting non existence flavor extra spec key should fail"""
self.assertRaises(lib_exc.NotFound,
self.admin_flavors_client.unset_flavor_extra_spec,
self.flavor['id'],
- 'nonexistent_key')
+ 'hw:cpu_thread_policy')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('329a7be3-54b2-48be-8052-bf2ce4afd898')
def test_flavor_get_nonexistent_key(self):
+ """Getting non existence flavor extra spec key should fail"""
self.assertRaises(lib_exc.NotFound,
self.flavors_client.show_flavor_extra_spec,
self.flavor['id'],
- "nonexistent_key")
+ 'hw:cpu_thread_policy')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('25b822b8-9f49-44f6-80de-d99f0482e5cb')
def test_flavor_update_mismatch_key(self):
- # the key will be updated should be match the key in the body
+ """Updating unmatched flavor extra spec key should fail
+
+ The key to be updated should match the key in the body
+ """
self.assertRaises(lib_exc.BadRequest,
self.admin_flavors_client.update_flavor_extra_spec,
self.flavor['id'],
- "key2",
- key1="value")
+ 'hw:numa_nodes',
+ **{'hw:cpu_policy': 'shared'})
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f5889590-bf66-41cc-b4b1-6e6370cfd93f')
def test_flavor_update_more_key(self):
- # there should be just one item in the request body
+ """Updating multiple flavor spec keys should fail
+
+ There should be just one item in the request body
+ """
self.assertRaises(lib_exc.BadRequest,
self.admin_flavors_client.update_flavor_extra_spec,
self.flavor['id'],
- "key1",
- key1="value",
- key2="value")
+ 'hw:numa_nodes',
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'})
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index c246685..31fe2b5 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -29,11 +29,13 @@
@decorators.idempotent_id('9bfaf98d-e2cb-44b0-a07e-2558b2821e4f')
def test_list_hosts(self):
+ # Listing hosts.
hosts = self.client.list_hosts()['hosts']
self.assertGreaterEqual(len(hosts), 2, str(hosts))
@decorators.idempotent_id('5dc06f5b-d887-47a2-bb2a-67762ef3c6de')
def test_list_hosts_with_zone(self):
+ # Listing hosts with specified availability zone
self.useFixture(fixtures.LockFixture('availability_zone'))
hosts = self.client.list_hosts()['hosts']
host = hosts[0]
@@ -43,6 +45,7 @@
@decorators.idempotent_id('9af3c171-fbf4-4150-a624-22109733c2a6')
def test_list_hosts_with_a_blank_zone(self):
+ # Listing hosts with blank availability zone.
# If send the request with a blank zone, the request will be successful
# and it will return all the hosts list
hosts = self.client.list_hosts(zone='')['hosts']
@@ -50,6 +53,7 @@
@decorators.idempotent_id('c6ddbadb-c94e-4500-b12f-8ffc43843ff8')
def test_list_hosts_with_nonexistent_zone(self):
+ # Listing hosts with not existing availability zone.
# If send the request with a nonexistent zone, the request will be
# successful and no hosts will be returned
hosts = self.client.list_hosts(zone='xxx')['hosts']
@@ -57,6 +61,7 @@
@decorators.idempotent_id('38adbb12-aee2-4498-8aec-329c72423aa4')
def test_show_host_detail(self):
+ # Showing host details.
hosts = self.client.list_hosts()['hosts']
hosts = [host for host in hosts if host['service'] == 'compute']
diff --git a/tempest/api/compute/admin/test_hosts_negative.py b/tempest/api/compute/admin/test_hosts_negative.py
index 8a91ae2..e8733c8 100644
--- a/tempest/api/compute/admin/test_hosts_negative.py
+++ b/tempest/api/compute/admin/test_hosts_negative.py
@@ -39,18 +39,21 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('dd032027-0210-4d9c-860e-69b1b8deed5f')
def test_list_hosts_with_non_admin_user(self):
+ # Non admin user is not allowed to list hosts.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_hosts)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f')
def test_show_host_detail_with_nonexistent_hostname(self):
+ # Showing host detail with not existing hostname should fail.
self.assertRaises(lib_exc.NotFound,
self.client.show_host, 'nonexistent_hostname')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc')
def test_show_host_detail_with_non_admin_user(self):
+ # Non admin user is not allowed to show host details.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.show_host,
self.hostname)
@@ -58,6 +61,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e40c72b1-0239-4ed6-ba21-81a184df1f7c')
def test_update_host_with_non_admin_user(self):
+ # Non admin user is not allowed to update host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.update_host,
self.hostname,
@@ -67,7 +71,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
def test_update_host_with_invalid_status(self):
- # 'status' can only be 'enable' or 'disable'
+ # Updating host to invalid status should fail,
+ # 'status' can only be 'enable' or 'disable'.
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname,
@@ -77,7 +82,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ab1e230e-5e22-41a9-8699-82b9947915d4')
def test_update_host_with_invalid_maintenance_mode(self):
- # 'maintenance_mode' can only be 'enable' or 'disable'
+ # Updating host to invalid maintenance mode should fail,
+ # 'maintenance_mode' can only be 'enable' or 'disable'.
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname,
@@ -87,7 +93,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0cd85f75-6992-4a4a-b1bd-d11e37fd0eee')
def test_update_host_without_param(self):
- # 'status' or 'maintenance_mode' needed for host update
+ # Updating host without param should fail,
+ # 'status' or 'maintenance_mode' is needed for host update.
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname)
@@ -95,6 +102,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('23c92146-2100-4d68-b2d6-c7ade970c9c1')
def test_update_nonexistent_host(self):
+ # Updating not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.update_host,
'nonexistent_hostname',
@@ -104,6 +112,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0d981ac3-4320-4898-b674-82b61fbb60e4')
def test_startup_nonexistent_host(self):
+ # Starting up not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.startup_host,
'nonexistent_hostname')
@@ -111,6 +120,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca')
def test_startup_host_with_non_admin_user(self):
+ # Non admin user is not allowed to startup host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.startup_host,
self.hostname)
@@ -118,6 +128,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9e637444-29cf-4244-88c8-831ae82c31b6')
def test_shutdown_nonexistent_host(self):
+ # Shutting down not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.shutdown_host,
'nonexistent_hostname')
@@ -125,6 +136,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a803529c-7e3f-4d3c-a7d6-8e1c203d27f6')
def test_shutdown_host_with_non_admin_user(self):
+ # Non admin user is not allowed to shutdown host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.shutdown_host,
self.hostname)
@@ -132,6 +144,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f86bfd7b-0b13-4849-ae29-0322e83ee58b')
def test_reboot_nonexistent_host(self):
+ # Rebooting not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.reboot_host,
'nonexistent_hostname')
@@ -139,6 +152,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('02d79bb9-eb57-4612-abf6-2cb38897d2f8')
def test_reboot_host_with_non_admin_user(self):
+ # Non admin user is not allowed to reboot host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reboot_host,
self.hostname)
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 9822c26..e45aac5 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -134,6 +134,7 @@
@decorators.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
+ # Searching for hypervisors by its name.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers, "No hypervisors found.")
hypers = self.client.search_hypervisor(
diff --git a/tempest/api/compute/admin/test_hypervisor_negative.py b/tempest/api/compute/admin/test_hypervisor_negative.py
index 0056376..723b93c 100644
--- a/tempest/api/compute/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/admin/test_hypervisor_negative.py
@@ -40,8 +40,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c136086a-0f67-4b2b-bc61-8482bd68989f')
def test_show_nonexistent_hypervisor(self):
+ # Showing not existing hypervisor should fail.
nonexistent_hypervisor_id = data_utils.rand_uuid()
-
self.assertRaises(
lib_exc.NotFound,
self.client.show_hypervisor,
@@ -50,6 +50,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('51e663d0-6b89-4817-a465-20aca0667d03')
def test_show_hypervisor_with_non_admin_user(self):
+ # Non admin user is not allowed to show hypervisor.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -61,6 +62,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e2b061bb-13f9-40d8-9d6e-d5bf17595849')
def test_get_hypervisor_stats_with_non_admin_user(self):
+ # Non admin user is not allowed to get hypervisor stats.
self.assertRaises(
lib_exc.Forbidden,
self.non_adm_client.show_hypervisor_statistics)
@@ -68,6 +70,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f60aa680-9a3a-4c7d-90e1-fae3a4891303')
def test_get_nonexistent_hypervisor_uptime(self):
+ # Getting uptime of not existing hypervisor should fail.
nonexistent_hypervisor_id = data_utils.rand_uuid()
self.assertRaises(
@@ -78,6 +81,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6c3461f9-c04c-4e2a-bebb-71dc9cb47df2')
def test_get_hypervisor_uptime_with_non_admin_user(self):
+ # Non admin user is not allowed to get hypervisor uptime.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -97,7 +101,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('dc02db05-e801-4c5f-bc8e-d915290ab345')
def test_get_hypervisor_list_details_with_non_admin_user(self):
- # List of hypervisor details and available services with non admin user
+ # Non admin user is not allowed to list hypervisor details.
self.assertRaises(
lib_exc.Forbidden,
self.non_adm_client.list_hypervisors, detail=True)
@@ -109,6 +113,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('2a0a3938-832e-4859-95bf-1c57c236b924')
def test_show_servers_with_non_admin_user(self):
+ # Non admin user is not allowed to show servers on hypervisor.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -120,6 +125,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('02463d69-0ace-4d33-a4a8-93d7883a2bba')
def test_show_servers_with_nonexistent_hypervisor(self):
+ # Showing servers on not existing hypervisor should fail.
nonexistent_hypervisor_id = data_utils.rand_uuid()
self.assertRaises(
@@ -130,6 +136,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5b6a6c79-5dc1-4fa5-9c58-9c8085948e74')
def test_search_hypervisor_with_non_admin_user(self):
+ # Non admin user is not allowed to search hypervisor.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -141,6 +148,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19a45cc1-1000-4055-b6d2-28e8b2ec4faa')
def test_search_nonexistent_hypervisor(self):
+ # Searching not existing hypervisor should fail.
self.assertRaises(
lib_exc.NotFound,
self.client.search_hypervisor,
diff --git a/tempest/api/compute/admin/test_keypairs_v210.py b/tempest/api/compute/admin/test_keypairs_v210.py
index 24ea8a1..40ed532 100644
--- a/tempest/api/compute/admin/test_keypairs_v210.py
+++ b/tempest/api/compute/admin/test_keypairs_v210.py
@@ -56,7 +56,7 @@
self.assertEqual(first_keyname, keypair_detail['name'])
self.assertEqual(user_id, keypair_detail['user_id'],
"The fetched keypair is not for requested user!")
- # Create a admin keypair
+ # Create an admin keypair
admin_keypair = self.create_keypair(keypair_type='ssh',
client=self.client)
admin_keypair.pop('private_key', None)
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 5a60dc6..a845c72 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -30,6 +30,14 @@
class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
+ """Test live migration operations supported by admin user"""
+
+ # These tests don't attempt any SSH validation nor do they use
+ # floating IPs on the instance, so all we need is a network and
+ # a subnet so the instance being migrated has a single port, but
+ # we need that to make sure we are properly updating the port
+ # host bindings during the live migration.
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -44,18 +52,6 @@
"Less than 2 compute nodes, skipping migration test.")
@classmethod
- def setup_credentials(cls):
- # These tests don't attempt any SSH validation nor do they use
- # floating IPs on the instance, so all we need is a network and
- # a subnet so the instance being migrated has a single port, but
- # we need that to make sure we are properly updating the port
- # host bindings during the live migration.
- # TODO(mriedem): SSH validation before and after the instance is
- # live migrated would be a nice test wrinkle addition.
- cls.set_network_resources(network=True, subnet=True)
- super(LiveMigrationTestBase, cls).setup_credentials()
-
- @classmethod
def setup_clients(cls):
super(LiveMigrationTestBase, cls).setup_clients()
cls.admin_migration_client = cls.os_admin.migrations_client
@@ -128,12 +124,14 @@
@decorators.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
def test_live_block_migration(self):
+ """Test live migrating an active server"""
self._test_live_migration()
@decorators.idempotent_id('1e107f21-61b2-4988-8f22-b196e938ab88')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
def test_live_block_migration_paused(self):
+ """Test live migrating a paused server"""
self._test_live_migration(state='PAUSED')
@testtools.skipUnless(CONF.compute_feature_enabled.
@@ -142,6 +140,7 @@
@decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
@utils.services('volume')
def test_volume_backed_live_migration(self):
+ """Test live migrating an active server booted from volume"""
self._test_live_migration(volume_backed=True)
@decorators.idempotent_id('e19c0cc6-6720-4ed8-be83-b6603ed5c812')
@@ -153,6 +152,7 @@
'Block Live migration not configured for iSCSI')
@utils.services('volume')
def test_iscsi_volume(self):
+ """Test live migrating a server with volume attached"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
target_host = self.get_host_other_than(server_id)
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 83f2e61..37f5aec 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -25,6 +25,7 @@
class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+ """Test migration operations supported by admin user"""
@classmethod
def setup_clients(cls):
@@ -33,14 +34,14 @@
@decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
def test_list_migrations(self):
- # Admin can get the migrations list
+ """Test admin user can get the migrations list"""
self.client.list_migrations()
@decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_list_migrations_in_flavor_resize_situation(self):
- # Admin can get the migrations list which contains the resized server
+ """Admin can get the migrations list containing the resized server"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
@@ -62,8 +63,11 @@
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert_deleted_flavor(self):
- # Tests that we can revert the resize on an instance whose original
- # flavor has been deleted.
+ """Test reverting resized server with original flavor deleted
+
+ Tests that we can revert the resize on an instance whose original
+ flavor has been deleted.
+ """
# First we have to create a flavor that we can delete so make a copy
# of the normal flavor from which we'd create a server.
@@ -137,10 +141,12 @@
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_cold_migration(self):
+ """Test cold migrating server and then confirm the migration"""
self._test_cold_migrate_server(revert=False)
@decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_revert_cold_migration(self):
+ """Test cold migrating server and then revert the migration"""
self._test_cold_migrate_server(revert=True)
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index 99907a8..fb6376e 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -24,7 +24,7 @@
"""Tests Nova Networks API that usually requires admin privileges.
API docs:
- https://developer.openstack.org/api-ref/compute/#networks-os-networks-deprecated
+ https://docs.openstack.org/api-ref/compute/#networks-os-networks-deprecated
"""
max_microversion = '2.35'
@@ -35,6 +35,7 @@
@decorators.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
def test_get_network(self):
+ """Test getting network from nova side"""
networks = self.client.list_networks()['networks']
if CONF.compute.fixed_network_name:
configured_network = [x for x in networks if x['label'] ==
@@ -56,6 +57,7 @@
@decorators.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
def test_list_all_networks(self):
+ """Test getting all networks from nova side"""
networks = self.client.list_networks()['networks']
# Check the configured network is in the list
if CONF.compute.fixed_network_name:
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 12c7255..0060ffe 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -212,7 +212,7 @@
# 'danger' flag.
@decorators.idempotent_id('7932ab0f-5136-4075-b201-c0e2338df51a')
def test_update_default_quotas(self):
- LOG.debug("get the current 'default' quota class values")
+ # get the current 'default' quota class values
body = (self.adm_client.show_quota_class_set('default')
['quota_class_set'])
self.assertEqual('default', body.pop('id'))
@@ -224,9 +224,14 @@
# there is a real chance that we go from -1 (unlimited)
# to a very small number which causes issues.
body[quota] = default + 100
- LOG.debug("update limits for the default quota class set")
+ # update limits for the default quota class set
update_body = self.adm_client.update_quota_class_set(
'default', **body)['quota_class_set']
- LOG.debug("assert that the response has all of the changed values")
+ # assert that the response has all of the changed values
self.assertThat(update_body.items(),
matchers.ContainsAll(body.items()))
+ # check quota values are changed
+ show_body = self.adm_client.show_quota_class_set(
+ 'default')['quota_class_set']
+ self.assertThat(show_body.items(),
+ matchers.ContainsAll(body.items()))
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
deleted file mode 100644
index bca6a22..0000000
--- a/tempest/api/compute/admin/test_security_group_default_rules.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-
-from tempest.api.compute import base
-from tempest import config
-from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
-
-CONF = config.CONF
-
-
-class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
- max_microversion = '2.35'
-
- @classmethod
- # TODO(GMann): Once Bug# 1311500 is fixed, these test can run
- # for Neutron also.
- @testtools.skipIf(CONF.service_available.neutron,
- "Skip as this functionality is not yet "
- "implemented in Neutron. Related Bug#1311500")
- def setup_credentials(cls):
- # A network and a subnet will be created for these tests
- cls.set_network_resources(network=True, subnet=True)
- super(SecurityGroupDefaultRulesTest, cls).setup_credentials()
-
- @classmethod
- def setup_clients(cls):
- super(SecurityGroupDefaultRulesTest, cls).setup_clients()
- cls.adm_client = cls.os_admin.security_group_default_rules_client
-
- def _create_security_group_default_rules(self, ip_protocol='tcp',
- from_port=22, to_port=22,
- cidr='10.10.0.0/24'):
- # Create Security Group default rule
- rule = self.adm_client.create_security_default_group_rule(
- ip_protocol=ip_protocol,
- from_port=from_port,
- to_port=to_port,
- cidr=cidr)['security_group_default_rule']
- self.assertEqual(ip_protocol, rule['ip_protocol'])
- self.assertEqual(from_port, rule['from_port'])
- self.assertEqual(to_port, rule['to_port'])
- self.assertEqual(cidr, rule['ip_range']['cidr'])
- return rule
-
- @decorators.idempotent_id('6d880615-eec3-4d29-97c5-7a074dde239d')
- def test_create_delete_security_group_default_rules(self):
- # Create and delete Security Group default rule
- ip_protocols = ['tcp', 'udp', 'icmp']
- for ip_protocol in ip_protocols:
- rule = self._create_security_group_default_rules(ip_protocol)
- # Delete Security Group default rule
- self.adm_client.delete_security_group_default_rule(rule['id'])
- self.assertRaises(lib_exc.NotFound,
- self.adm_client.show_security_group_default_rule,
- rule['id'])
-
- @decorators.idempotent_id('4d752e0a-33a1-4c3a-b498-ff8667ca22e5')
- def test_create_security_group_default_rule_without_cidr(self):
- ip_protocol = 'udp'
- from_port = 80
- to_port = 80
- rule = self.adm_client.create_security_default_group_rule(
- ip_protocol=ip_protocol,
- from_port=from_port,
- to_port=to_port)['security_group_default_rule']
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- self.assertNotEqual(0, rule['id'])
- self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
-
- @decorators.idempotent_id('29f2d218-69b0-4a95-8f3d-6bd0ef732b3a')
- def test_create_security_group_default_rule_with_blank_cidr(self):
- ip_protocol = 'icmp'
- from_port = 10
- to_port = 10
- cidr = ''
- rule = self.adm_client.create_security_default_group_rule(
- ip_protocol=ip_protocol,
- from_port=from_port,
- to_port=to_port,
- cidr=cidr)['security_group_default_rule']
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- self.assertNotEqual(0, rule['id'])
- self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
-
- @decorators.idempotent_id('6e6de55e-9146-4ae0-89f2-3569586e0b9b')
- def test_security_group_default_rules_list(self):
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- cidr = '10.10.0.0/24'
- rule = self._create_security_group_default_rules(ip_protocol,
- from_port,
- to_port,
- cidr)
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- rules = (self.adm_client.list_security_group_default_rules()
- ['security_group_default_rules'])
- self.assertNotEmpty(rules)
- self.assertIn(rule, rules)
-
- @decorators.idempotent_id('15cbb349-86b4-4f71-a048-04b7ef3f150b')
- def test_default_security_group_default_rule_show(self):
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- cidr = '10.10.0.0/24'
- rule = self._create_security_group_default_rules(ip_protocol,
- from_port,
- to_port,
- cidr)
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- fetched_rule = self.adm_client.show_security_group_default_rule(
- rule['id'])['security_group_default_rule']
- self.assertEqual(rule, fetched_rule)
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index f0178aa..dfa801b 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -73,8 +73,17 @@
# search filter
fetched_list = (self.client.list_security_groups(all_tenants='true')
['security_groups'])
- # Now check if all created Security Groups are present in fetched list
- for sec_group in fetched_list:
- self.assertEqual(sec_group['tenant_id'], client_tenant_id,
- "Failed to get all security groups for "
- "non admin user.")
+ sec_group_id_list = [sg['id'] for sg in fetched_list]
+ # Now check that 'all_tenants='true' filter for non-admin user only
+ # provide the requested non-admin user's created security groups,
+ # not all security groups which include security groups created by
+ # other users.
+ for sec_group in security_group_list:
+ if sec_group['tenant_id'] == client_tenant_id:
+ self.assertIn(sec_group['id'], sec_group_id_list,
+ "Failed to get all security groups for "
+ "non admin user.")
+ else:
+ self.assertNotIn(sec_group['id'], sec_group_id_list,
+ "Non admin user shouldn't get other user's "
+ "security groups.")
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 5cd98f4..f440428 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -23,7 +23,7 @@
class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
-
+ """Test creating servers on mutiple nodes with scheduler_hints."""
@classmethod
def resource_setup(cls):
super(ServersOnMultiNodesTest, cls).resource_setup()
@@ -65,6 +65,7 @@
compute.is_scheduler_filter_enabled("SameHostFilter"),
'SameHostFilter is not available.')
def test_create_servers_on_same_host(self):
+ """Test creating servers with hints 'same_host'"""
hints = {'same_host': self.server01}
server02 = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
@@ -76,6 +77,7 @@
compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_create_servers_on_different_hosts(self):
+ """Test creating servers with hints of single 'different_host'"""
hints = {'different_host': self.server01}
server02 = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
@@ -87,7 +89,7 @@
compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_create_servers_on_different_hosts_with_list_of_servers(self):
- # This scheduler-hint supports list of servers also.
+ """Test creating servers with hints of a list of 'different_host'"""
hints = {'different_host': [self.server01]}
server02 = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
@@ -105,7 +107,7 @@
asserts the servers are in the group and on different hosts.
"""
hosts = self._create_servers_with_group('anti-affinity')
- hostnames = hosts.values()
+ hostnames = list(hosts.values())
self.assertNotEqual(hostnames[0], hostnames[1],
'Servers are on the same host: %s' % hosts)
@@ -120,6 +122,6 @@
asserts the servers are in the group and on same host.
"""
hosts = self._create_servers_with_group('affinity')
- hostnames = hosts.values()
+ hostnames = list(hosts.values())
self.assertEqual(hostnames[0], hostnames[1],
'Servers are on the different hosts: %s' % hosts)
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 73e191b..bf846e5 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -28,11 +28,13 @@
@decorators.idempotent_id('5be41ef4-53d1-41cc-8839-5c2a48a1b283')
def test_list_services(self):
+ # Listing nova services
services = self.client.list_services()['services']
self.assertNotEmpty(services)
@decorators.idempotent_id('f345b1ec-bc6e-4c38-a527-3ca2bc00bef5')
def test_get_service_by_service_binary_name(self):
+ # Listing nova services by binary name.
binary_name = 'nova-compute'
services = self.client.list_services(binary=binary_name)['services']
self.assertNotEmpty(services)
@@ -41,6 +43,7 @@
@decorators.idempotent_id('affb42d5-5b4b-43c8-8b0b-6dca054abcca')
def test_get_service_by_host_name(self):
+ # Listing nova services by host name.
services = self.client.list_services()['services']
host_name = services[0]['host']
services_on_host = [service for service in services if
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index d264829..033caa8 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -31,14 +31,18 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1126d1f8-266e-485f-a687-adc547492646')
def test_list_services_with_non_admin_user(self):
+ """Non admin user is not allowed to list nova services"""
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_services)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d0884a69-f693-4e79-a9af-232d15643bf7')
def test_get_service_by_invalid_params(self):
- # Expect all services to be returned when the request contains invalid
- # parameters.
+ """Test listing services by invalid filter should return all services
+
+ Expect all services to be returned when the request contains invalid
+ parameters.
+ """
services = self.client.list_services()['services']
services_xxx = (self.client.list_services(xxx='nova-compute')
['services'])
@@ -47,6 +51,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1e966d4a-226e-47c7-b601-0b18a27add54')
def test_get_service_by_invalid_service_and_valid_host(self):
+ """Test listing services by invalid service and valid host value"""
services = self.client.list_services()['services']
host_name = services[0]['host']
services = self.client.list_services(host=host_name,
@@ -56,6 +61,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('64e7e7fb-69e8-4cb6-a71d-8d5eb0c98655')
def test_get_service_with_valid_service_and_invalid_host(self):
+ """Test listing services by valid service and invalid host value"""
services = self.client.list_services()['services']
binary_name = services[0]['binary']
services = self.client.list_services(host='xxx',
@@ -79,6 +85,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('508671aa-c929-4479-bd10-8680d40dd0a6')
def test_enable_service_with_invalid_service_id(self):
+ """Test updating non existing service to status enabled"""
self.assertRaises(lib_exc.NotFound,
self.client.update_service,
service_id=self.fake_service_id,
@@ -87,6 +94,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a9eeeade-42b3-419f-87aa-c9342aa068cf')
def test_disable_service_with_invalid_service_id(self):
+ """Test updating non existing service to status disabled"""
self.assertRaises(lib_exc.NotFound,
self.client.update_service,
service_id=self.fake_service_id,
@@ -95,6 +103,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f46a9d91-1e85-4b96-8e7a-db7706fa2e9a')
def test_disable_log_reason_with_invalid_service_id(self):
+ """Test updating non existing service to disabled with reason"""
+
# disabled_reason requires that status='disabled' be provided.
self.assertRaises(lib_exc.NotFound,
self.client.update_service,
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index a853182..edcb1a7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -23,6 +23,7 @@
class TestVolumeSwapBase(base.BaseV2ComputeAdminTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -70,6 +71,7 @@
"""The test suite for swapping of volume with admin user.
The following is the scenario outline:
+
1. Create a volume "volume1" with non-admin.
2. Create a volume "volume2" with non-admin.
3. Boot an instance "instance1" with non-admin.
@@ -141,12 +143,25 @@
if not CONF.compute_feature_enabled.volume_multiattach:
raise cls.skipException('Volume multi-attach is not available.')
+ @classmethod
+ def setup_clients(cls):
+ super(TestMultiAttachVolumeSwap, cls).setup_clients()
+ # Need this to set readonly volumes.
+ cls.admin_volumes_client = cls.os_admin.volumes_client_latest
+
# NOTE(mriedem): This is an uncommon scenario to call the compute API
# to swap volumes directly; swap volume is primarily only for volume
# live migration and retype callbacks from the volume service, and is slow
# so it's marked as such.
@decorators.attr(type='slow')
@decorators.idempotent_id('e8f8f9d1-d7b7-4cd2-8213-ab85ef697b6e')
+ # For some reason this test intermittently fails on teardown when there are
+ # multiple compute nodes and the servers are split across the computes.
+ # For now, just skip this test if there are multiple computes.
+ # Alternatively we could put the servers in an affinity group if there are
+ # multiple computes but that would just side-step the underlying bug.
+ @decorators.skip_because(bug='1807723',
+ condition=CONF.compute.min_compute_nodes > 1)
@utils.services('volume')
def test_volume_swap_with_multiattach(self):
# Create two volumes.
@@ -154,6 +169,13 @@
# volumes cleanup can happen successfully irrespective of which volume
# is attached to server.
volume1 = self.create_volume(multiattach=True)
+ # Make volume1 read-only since you can't swap from a volume with
+ # multiple read/write attachments, and you can't change the readonly
+ # flag on an in-use volume so we have to do this before attaching
+ # volume1 to anything. If the compute API ever supports per-attachment
+ # attach modes, then we can handle this differently.
+ self.admin_volumes_client.update_volume_readonly(
+ volume1['id'], readonly=True)
volume2 = self.create_volume(multiattach=True)
# Create two servers and wait for them to be ACTIVE.
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 4a7f36f..7b0f48b 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -13,6 +13,7 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -22,6 +23,7 @@
class VolumesAdminNegativeTest(base.BaseV2ComputeAdminTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -57,3 +59,66 @@
self.admin_servers_client.update_attached_volume,
self.server['id'], volume['id'],
volumeId=nonexistent_volume)
+
+
+class UpdateMultiattachVolumeNegativeTest(base.BaseV2ComputeAdminTest):
+
+ min_microversion = '2.60'
+ volume_min_microversion = '3.27'
+
+ @classmethod
+ def skip_checks(cls):
+ super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.volume_multiattach:
+ raise cls.skipException('Volume multi-attach is not available.')
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
+ @utils.services('volume')
+ def test_multiattach_rw_volume_update_failure(self):
+
+ # Create two multiattach capable volumes.
+ vol1 = self.create_volume(multiattach=True)
+ vol2 = self.create_volume(multiattach=True)
+
+ # Create two instances.
+ server1 = self.create_test_server(wait_until='ACTIVE')
+ server2 = self.create_test_server(wait_until='ACTIVE')
+
+ # Attach vol1 to both of these instances.
+ vol1_attachment1 = self.attach_volume(server1, vol1)
+ vol1_attachment2 = self.attach_volume(server2, vol1)
+
+ # Assert that we now have two attachments.
+ vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+ self.assertEqual(2, len(vol1['attachments']))
+
+ # By default both of these attachments should have an attach_mode of
+ # read-write, assert that here to ensure the following calls to update
+ # the volume will be rejected.
+ for volume_attachment in vol1['attachments']:
+ attachment_id = volume_attachment['attachment_id']
+ attachment = self.attachments_client.show_attachment(
+ attachment_id)['attachment']
+ self.assertEqual('rw', attachment['attach_mode'])
+
+ # Assert that a BadRequest is raised when we attempt to update volume1
+ # to volume2 on server1 or server2.
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_servers_client.update_attached_volume,
+ server1['id'], vol1['id'], volumeId=vol2['id'])
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_servers_client.update_attached_volume,
+ server2['id'], vol1['id'], volumeId=vol2['id'])
+
+ # Fetch the volume 1 to check the current attachments.
+ vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+ vol1_attachment_ids = [a['id'] for a in vol1['attachments']]
+
+ # Assert that volume 1 is still attached to both server 1 and 2.
+ self.assertIn(vol1_attachment1['id'], vol1_attachment_ids)
+ self.assertIn(vol1_attachment2['id'], vol1_attachment_ids)
+
+ # Assert that volume 2 has no attachments.
+ vol2 = self.volumes_client.show_volume(vol2['id'])['volume']
+ self.assertEqual([], vol2['attachments'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 4e2b59b..74570ce 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -17,11 +17,11 @@
from oslo_log import log as logging
-from tempest.api.compute import api_microversion_fixture
from tempest.common import compute
from tempest.common import waiters
from tempest import config
from tempest import exceptions
+from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
@@ -39,6 +39,9 @@
"""Base test case class for all Compute API tests."""
force_tenant_isolation = False
+ # Set this to True in subclasses to create a default network. See
+ # https://bugs.launchpad.net/tempest/+bug/1844568
+ create_default_network = False
# TODO(andreaf) We should care also for the alt_manager here
# but only once client lazy load in the manager is done
@@ -49,16 +52,22 @@
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
- cfg_min_version = CONF.compute.min_microversion
- cfg_max_version = CONF.compute.max_microversion
- api_version_utils.check_skip_with_microversion(cls.min_microversion,
- cls.max_microversion,
- cfg_min_version,
- cfg_max_version)
+ api_version_utils.check_skip_with_microversion(
+ cls.min_microversion, cls.max_microversion,
+ CONF.compute.min_microversion, CONF.compute.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.volume_min_microversion, cls.volume_max_microversion,
+ CONF.volume.min_microversion, CONF.volume.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.placement_min_microversion, cls.placement_max_microversion,
+ CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
- cls.set_network_resources()
+ # Setting network=True, subnet=True creates a default network
+ cls.set_network_resources(
+ network=cls.create_default_network,
+ subnet=cls.create_default_network)
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
@@ -99,6 +108,8 @@
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
+ cls.attachments_client = cls.os_primary.attachments_client_latest
+ cls.snapshots_client = cls.os_primary.snapshots_client_latest
if CONF.service_available.glance:
if CONF.image_feature_enabled.api_v1:
cls.images_client = cls.os_primary.image_client
@@ -145,6 +156,14 @@
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
+ cls.volume_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.volume_min_microversion,
+ CONF.volume.min_microversion))
+ cls.placement_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.placement_min_microversion,
+ CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
@@ -209,7 +228,7 @@
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
- validation_resources=None, **kwargs):
+ validation_resources=None, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
@@ -221,6 +240,7 @@
:param volume_backed: Whether the instance is volume backed or not.
:param validation_resources: Dictionary of validation resources as
returned by `get_class_validation_resources`.
+ :param clients: Client manager, defaults to os_primary.
:param kwargs: Extra arguments are passed down to the
`compute.create_test_server` call.
"""
@@ -237,8 +257,11 @@
not tenant_network):
kwargs['networks'] = 'none'
+ if clients is None:
+ clients = cls.os_primary
+
body, servers = compute.create_test_server(
- cls.os_primary,
+ clients,
validatable,
validation_resources=validation_resources,
tenant_network=tenant_network,
@@ -249,11 +272,11 @@
# and then wait for all
for server in servers:
cls.addClassResourceCleanup(waiters.wait_for_server_termination,
- cls.servers_client, server['id'])
+ clients.servers_client, server['id'])
for server in servers:
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
- cls.servers_client.delete_server, server['id'])
+ clients.servers_client.delete_server, server['id'])
return body
@@ -457,7 +480,7 @@
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
- raise exceptions.InvalidParam(invalid_param=msg)
+ raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
@@ -470,14 +493,16 @@
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- self.request_microversion))
+ compute_microversion=self.request_microversion,
+ volume_microversion=self.volume_request_microversion,
+ placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
"""Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
- :**kwargs: other parameters to create volume.
+ :param kwargs: other parameters to create volume.
:returns: The available volume.
"""
if 'size' not in kwargs:
@@ -487,6 +512,9 @@
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
@@ -533,11 +561,17 @@
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
- # On teardown detach the volume and wait for it to be available. This
- # is so we don't error out when trying to delete the volume during
- # teardown.
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.volumes_client, volume['id'], 'available')
+ # On teardown detach the volume and for multiattach volumes wait for
+ # the attachment to be removed. For non-multiattach volumes wait for
+ # the state of the volume to change to available. This is so we don't
+ # error out when trying to delete the volume during teardown.
+ if volume['multiattach']:
+ self.addCleanup(waiters.wait_for_volume_attachment_remove,
+ self.volumes_client, volume['id'],
+ attachment['id'])
+ else:
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
@@ -545,6 +579,25 @@
volume['id'], 'in-use')
return attachment
+ def create_volume_snapshot(self, volume_id, name=None, description=None,
+ metadata=None, force=False):
+ name = name or data_utils.rand_name(
+ self.__class__.__name__ + '-snapshot')
+ snapshot = self.snapshots_client.create_snapshot(
+ volume_id=volume_id,
+ force=force,
+ display_name=name,
+ description=description,
+ metadata=metadata)['snapshot']
+ self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
+ snapshot['id'])
+ self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot['id'], 'available')
+ snapshot = self.snapshots_client.show_snapshot(
+ snapshot['id'])['snapshot']
+ return snapshot
+
def assert_flavor_equal(self, flavor_id, server_flavor):
"""Check whether server_flavor equals to flavor.
@@ -603,8 +656,14 @@
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
- hosts = [svc['host'] for svc in svcs
- if svc['state'] == 'up' and svc['status'] == 'enabled']
+ hosts = []
+ for svc in svcs:
+ if svc['state'] == 'up' and svc['status'] == 'enabled':
+ if CONF.compute.compute_volume_common_az:
+ if svc['zone'] == CONF.compute.compute_volume_common_az:
+ hosts.append(svc['host'])
+ else:
+ hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index 3a474e6..235049a 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -70,9 +70,7 @@
self.assertEqual(min_img_ram, image['min_ram'])
# Try to create server with flavor of insufficient ram size
- self.assertRaisesRegex(lib_exc.BadRequest,
- "Flavor's memory is too small for "
- "requested image",
- self.create_test_server,
- image_id=image['id'],
- flavor=flavor['id'])
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_test_server,
+ image_id=image['id'],
+ flavor=flavor['id'])
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index c8221c2..ef33685 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -12,17 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-import testtools
+from tempest.lib import exceptions as lib_exceptions
CONF = config.CONF
class ImagesTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -47,12 +50,27 @@
def test_delete_saving_image(self):
server = self.create_test_server(wait_until='ACTIVE')
self.addCleanup(self.servers_client.delete_server, server['id'])
- image = self.create_image_from_server(server['id'],
- wait_until='SAVING')
- self.client.delete_image(image['id'])
- msg = ('The image with ID {image_id} failed to be deleted'
- .format(image_id=image['id']))
- self.assertTrue(self.client.is_resource_deleted(image['id']), msg)
+ # wait for server active to avoid conflict when deleting server
+ # in task_state image_snapshot
+ self.addCleanup(waiters.wait_for_server_status, self.servers_client,
+ server['id'], 'ACTIVE')
+ snapshot_name = data_utils.rand_name('test-snap')
+ try:
+ image = self.create_image_from_server(server['id'],
+ name=snapshot_name,
+ wait_until='SAVING')
+ self.client.delete_image(image['id'])
+ msg = ('The image with ID {image_id} failed to be deleted'
+ .format(image_id=image['id']))
+ self.assertTrue(self.client.is_resource_deleted(image['id']),
+ msg)
+ self.assertEqual(snapshot_name, image['name'])
+ except lib_exceptions.TimeoutException as ex:
+ # If timeout is reached, we don't need to check state,
+ # since, it wouldn't be a 'SAVING' state atleast and apart from
+ # it, this testcase doesn't have scope for other state transition
+ # Hence, skip the test.
+ raise self.skipException("This test is skipped because " + str(ex))
@decorators.idempotent_id('aaacd1d0-55a2-4ce8-818a-b5439df8adc9')
def test_create_image_from_stopped_server(self):
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 3c152c9..b811421 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -15,7 +15,6 @@
from tempest.api.compute import base
from tempest import config
-from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -101,11 +100,5 @@
# will return 400(Bad Request) if we attempt to send a name which has
# 4 byte utf-8 character.
utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
- body = self.compute_images_client.create_image(
- self.server_id, name=utf8_name)
- if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", body.response, "lt"):
- image_id = body['image_id']
- else:
- image_id = data_utils.parse_image_id(body.response['location'])
- self.addCleanup(self.client.delete_image, image_id)
+ self.create_image_from_server(self.server_id, name=utf8_name,
+ wait_until='ACTIVE')
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 512c9d2..37f9be3 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -30,6 +30,7 @@
class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
def tearDown(self):
"""Terminate test instances created after a test is executed."""
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index 49125d1..ef69a13 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -24,18 +24,14 @@
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
max_microversion = '2.35'
+ create_default_network = True
+
@classmethod
def skip_checks(cls):
super(BaseSecurityGroupsTest, cls).skip_checks()
if not utils.get_service_list()['network']:
raise cls.skipException("network service not enabled.")
- @classmethod
- def setup_credentials(cls):
- # A network and a subnet will be created for these tests
- cls.set_network_resources(network=True, subnet=True)
- super(BaseSecurityGroupsTest, cls).setup_credentials()
-
@staticmethod
def generate_random_security_group_id():
if (CONF.service_available.neutron and
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0636ee4..c1af6c7 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -15,6 +15,7 @@
import time
+from oslo_log import log
import six
from tempest.api.compute import base
@@ -23,11 +24,16 @@
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
+LOG = log.getLogger(__name__)
+
class AttachInterfacesTestBase(base.BaseV2ComputeTest):
@@ -38,11 +44,16 @@
raise cls.skipException("Neutron is required")
if not CONF.compute_feature_enabled.interface_attach:
raise cls.skipException("Interface attachment is not available.")
+ if not CONF.validation.run_validation:
+ raise cls.skipException('Validation should be enabled to ensure '
+ 'guest OS is running and capable of '
+ 'processing ACPI events.')
@classmethod
def setup_credentials(cls):
# This test class requires network and subnet
- cls.set_network_resources(network=True, subnet=True)
+ cls.set_network_resources(network=True, subnet=True, router=True,
+ dhcp=True)
super(AttachInterfacesTestBase, cls).setup_credentials()
@classmethod
@@ -51,14 +62,40 @@
cls.subnets_client = cls.os_primary.subnets_client
cls.ports_client = cls.os_primary.ports_client
+ def _wait_for_validation(self, server, validation_resources):
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.image_ssh_user,
+ self.image_ssh_password,
+ validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+ linux_client.validate_authentication()
+
def _create_server_get_interfaces(self):
- server = self.create_test_server(wait_until='ACTIVE')
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until='ACTIVE')
+ # NOTE(mgoddard): Get detailed server to ensure addresses are present
+ # in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
+ # NOTE(artom) self.create_test_server adds cleanups, but this is
+ # apparently not enough? Add cleanup here.
+ self.addCleanup(self.delete_server, server['id'])
+ self._wait_for_validation(server, validation_resources)
+ try:
+ fip = set([validation_resources['floating_ip']['ip']])
+ except KeyError:
+ fip = ()
ifs = (self.interfaces_client.list_interfaces(server['id'])
['interfaceAttachments'])
body = waiters.wait_for_interface_status(
self.interfaces_client, server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
- return server, ifs
+ return server, ifs, fip
class AttachInterfacesTestJSON(AttachInterfacesTestBase):
@@ -121,7 +158,9 @@
def _test_create_interface_by_port_id(self, server, ifs):
network_id = ifs[0]['net_id']
- port = self.ports_client.create_port(network_id=network_id)
+ port = self.ports_client.create_port(
+ network_id=network_id,
+ name=data_utils.rand_name(self.__class__.__name__))
port_id = port['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
iface = self.interfaces_client.create_interface(
@@ -143,7 +182,9 @@
iface = self.interfaces_client.create_interface(
server['id'], net_id=network_id,
fixed_ips=fixed_ips)['interfaceAttachment']
- self.addCleanup(self.ports_client.delete_port, iface['port_id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port,
+ iface['port_id'])
self._check_interface(iface, server_id=server['id'],
fixed_ip=ip_list[0])
return iface
@@ -189,7 +230,7 @@
@decorators.idempotent_id('73fe8f02-590d-4bf1-b184-e9ca81065051')
@utils.services('network')
def test_create_list_show_delete_interfaces_by_network_port(self):
- server, ifs = self._create_server_get_interfaces()
+ server, ifs, _ = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
@@ -228,23 +269,13 @@
if not (CONF.auth.use_dynamic_credentials and
CONF.auth.create_isolated_networks and
not CONF.network.shared_physical_network):
- raise self.skipException("Only owner network supports "
- "creating interface by fixed ip.")
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
- server, ifs = self._create_server_get_interfaces()
+ server, ifs, _ = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
- try:
- iface = self._test_create_interface(server)
- except lib_exc.BadRequest as e:
- msg = ('Multiple possible networks found, use a Network ID to be '
- 'more specific.')
- if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
- raise
- else:
- ifs.append(iface)
-
iface = self._test_create_interface_by_fixed_ips(server, ifs)
ifs.append(iface)
@@ -270,19 +301,35 @@
"""
network = self.get_tenant_network()
network_id = network['id']
- port = self.ports_client.create_port(network_id=network_id)
+ port = self.ports_client.create_port(
+ network_id=network_id,
+ name=data_utils.rand_name(self.__class__.__name__))
port_id = port['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
- # create two servers
- _, servers = compute.create_test_server(
- self.os_primary, tenant_network=network,
- wait_until='ACTIVE', min_count=2)
+ # NOTE(artom) We create two servers one at a time because
+ # create_test_server doesn't support multiple validatable servers.
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+
+ def _create_validatable_server():
+ _, servers = compute.create_test_server(
+ self.os_primary, tenant_network=network,
+ wait_until='ACTIVE', validatable=True,
+ validation_resources=validation_resources)
+ return servers[0]
+
+ servers = [_create_validatable_server(), _create_validatable_server()]
+
# add our cleanups for the servers since we bypassed the base class
for server in servers:
self.addCleanup(self.delete_server, server['id'])
for server in servers:
+ # NOTE(mgoddard): Get detailed server to ensure addresses are
+ # present in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
+ self._wait_for_validation(server, validation_resources)
# attach the port to the server
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
@@ -302,22 +349,75 @@
@decorators.idempotent_id('c7e0e60b-ee45-43d0-abeb-8596fd42a2f9')
@utils.services('network')
def test_add_remove_fixed_ip(self):
+ # NOTE(zhufl) By default only project that is admin or network owner
+ # or project with role advsvc is authorised to add interfaces with
+ # fixed-ip, so if we don't create network for each project, do not
+ # test
+ if not (CONF.auth.use_dynamic_credentials and
+ CONF.auth.create_isolated_networks and
+ not CONF.network.shared_physical_network):
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
# Add and Remove the fixed IP to server.
- server, ifs = self._create_server_get_interfaces()
- interface_count = len(ifs)
- self.assertGreater(interface_count, 0)
+ server, ifs, fip = self._create_server_get_interfaces()
+ original_interface_count = len(ifs) # This is the number of ports.
+ self.assertGreater(original_interface_count, 0)
+ # Get the starting list of IPs on the server.
+ addresses = self.os_primary.servers_client.list_addresses(
+ server['id'])['addresses']
+ # There should be one entry for the single network mapped to a list of
+ # addresses, which at this point should have at least one entry.
+ # Note that we could start with two addresses depending on how tempest
+ # is configured for using floating IPs.
+ self.assertEqual(1, len(addresses), addresses) # number of networks
+ # Keep track of the original addresses so we can know which IP is new.
+ original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
+ # Make sure the floating IP possibly assigned during
+ # server creation is always present in the set of original ips.
+ original_ips = set(original_ips).union(fip)
+ original_ip_count = len(original_ips)
+ self.assertGreater(original_ip_count, 0, addresses) # at least 1
network_id = ifs[0]['net_id']
+ # Add another fixed IP to the server. This should result in another
+ # fixed IP on the same network (and same port since we only have one
+ # port).
self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
- # Remove the fixed IP from server.
+
+ def _wait_for_ip_change(expected_count):
+ _addresses = self.os_primary.servers_client.list_addresses(
+ server['id'])['addresses']
+ _ips = set([addr['addr'] for addr in list(_addresses.values())[0]])
+ # Make sure possible floating ip is always present in the set.
+ _ips = _ips.union(fip)
+ LOG.debug("Wait for change of IPs. All IPs still associated to "
+ "the server %(id)s: %(ips)s",
+ {'id': server['id'], 'ips': _ips})
+ return len(_ips) == expected_count
+
+ # Wait for the ips count to increase by one.
+ if not test_utils.call_until_true(
+ _wait_for_ip_change, CONF.compute.build_timeout,
+ CONF.compute.build_interval, original_ip_count + 1):
+ raise lib_exc.TimeoutException(
+ 'Timed out while waiting for IP count to increase.')
+
+ # Remove the fixed IP that we just added.
server_detail = self.os_primary.servers_client.show_server(
server['id'])['server']
# Get the Fixed IP from server.
fixed_ip = None
for ip_set in server_detail['addresses']:
for ip in server_detail['addresses'][ip_set]:
- if ip['OS-EXT-IPS:type'] == 'fixed':
+ if (ip['OS-EXT-IPS:type'] == 'fixed' and
+ ip['addr'] not in original_ips):
fixed_ip = ip['addr']
break
if fixed_ip is not None:
break
self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
+ # Wait for the interface count to decrease by one.
+ if not test_utils.call_until_true(
+ _wait_for_ip_change, CONF.compute.build_timeout,
+ CONF.compute.build_interval, original_ip_count):
+ raise lib_exc.TimeoutException(
+ 'Timed out while waiting for IP count to decrease.')
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
index 3447d85..d0f53fe 100644
--- a/tempest/api/compute/servers/test_create_server_multi_nic.py
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -14,7 +14,6 @@
# under the License.
import netaddr
-import testtools
from tempest.api.compute import base
from tempest import config
@@ -27,6 +26,12 @@
class ServersTestMultiNic(base.BaseV2ComputeTest):
@classmethod
+ def skip_checks(cls):
+ super(ServersTestMultiNic, cls).skip_checks()
+ if not CONF.service_available.neutron:
+ raise cls.skipException('Neutron service must be available.')
+
+ @classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestMultiNic, cls).setup_credentials()
@@ -53,8 +58,6 @@
return net
@decorators.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
- @testtools.skipUnless(CONF.service_available.neutron,
- 'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
@@ -91,8 +94,6 @@
self.assertIn(address, network)
@decorators.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
- @testtools.skipUnless(CONF.service_available.neutron,
- 'Neutron service must be available.')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 0093752..a7db88a 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -26,6 +26,7 @@
class DeleteServersTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
# NOTE: Server creations of each test class should be under 10
# for preventing "Quota exceeded for instances"
@@ -107,11 +108,10 @@
@utils.services('volume')
def test_delete_server_while_in_attached_volume(self):
# Delete a server while a volume is attached to it
- device = '/dev/%s' % CONF.compute.volume_device_name
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
- self.attach_volume(server, volume, device=device)
+ self.attach_volume(server, volume)
self.client.delete_server(server['id'])
waiters.wait_for_server_termination(self.client, server['id'])
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index d40f937..8879369 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -12,9 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
+from json import decoder as json_decoder
from oslo_log import log as logging
+from oslo_serialization import jsonutils as json
from tempest.api.compute import base
from tempest.common import utils
@@ -111,7 +112,11 @@
max_microversion = '2.32'
def verify_device_metadata(self, md_json):
- md_dict = json.loads(md_json)
+ try:
+ md_dict = json.loads(md_json)
+ except (json_decoder.JSONDecodeError, TypeError):
+ return False
+
for d in md_dict['devices']:
if d['type'] == 'nic':
if d['mac'] == self.port1['mac_address']:
@@ -176,11 +181,13 @@
# Create ports
self.port1 = self.ports_client.create_port(
network_id=net1['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
fixed_ips=[{'subnet_id': subnet1['id']}])['port']
self.addCleanup(self.ports_client.delete_port, self.port1['id'])
self.port2 = self.ports_client.create_port(
network_id=net1['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
fixed_ips=[{'subnet_id': subnet1['id']}])['port']
self.addCleanup(self.ports_client.delete_port, self.port2['id'])
@@ -309,7 +316,11 @@
raise cls.skipException('Metadata API must be enabled')
def verify_device_metadata(self, md_json):
- md_dict = json.loads(md_json)
+ try:
+ md_dict = json.loads(md_json)
+ except (json_decoder.JSONDecodeError, TypeError):
+ return False
+
found_devices = [d['tags'][0] for d in md_dict['devices']
if d.get('tags')]
try:
@@ -357,9 +368,14 @@
validation_resources=validation_resources,
config_drive=config_drive_enabled,
name=data_utils.rand_name('device-tagging-server'),
- networks=[{'uuid': self.get_tenant_network()['id']}])
+ networks=[{'uuid': self.get_tenant_network()['id']}],
+ wait_until='ACTIVE')
self.addCleanup(self.delete_server, server['id'])
+ # NOTE(mgoddard): Get detailed server to ensure addresses are present
+ # in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
+
# Attach tagged nic and volume
interface = self.interfaces_client.create_interface(
server['id'], net_id=net['id'],
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index bc48069..5b8e7ab 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -24,6 +24,7 @@
class ServerDiskConfigTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index b916a42..00837eb 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -19,6 +19,7 @@
class InstanceActionsTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
@@ -54,6 +55,7 @@
class InstanceActionsV221TestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
min_microversion = '2.21'
max_microversion = 'latest'
diff --git a/tempest/api/compute/servers/test_instance_actions_negative.py b/tempest/api/compute/servers/test_instance_actions_negative.py
index 1d3a790..4b5a2c3 100644
--- a/tempest/api/compute/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/servers/test_instance_actions_negative.py
@@ -20,6 +20,7 @@
class InstanceActionsNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 18a78f0..b95db5c 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -20,6 +20,7 @@
class ListServersNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
@@ -80,7 +81,7 @@
@decorators.idempotent_id('fcdf192d-0f74-4d89-911f-1ec002b822c4')
def test_list_servers_status_non_existing(self):
# When invalid status is specified, up to microversion 2.37,
- # an empty list is returnd, and starting from microversion 2.38,
+ # an empty list is returned, and starting from microversion 2.38,
# a 400 error is returned in that case.
if self.is_requested_microversion_compatible('2.37'):
body = self.client.list_servers(status='non_existing_status')
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 059454d..dcadace 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -19,9 +19,11 @@
class MultipleCreateTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@decorators.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
def test_multiple_create(self):
+ # Creating server with min_count=2, 2 servers will be created.
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
self.os_primary,
@@ -38,6 +40,8 @@
@decorators.idempotent_id('864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7')
def test_multiple_create_with_reservation_return(self):
+ # Creating multiple servers with return_reservation_id=True,
+ # reservation_id will be returned.
body = self.create_test_server(wait_until='ACTIVE',
min_count=1,
max_count=2,
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 422510f..6bdf83b 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -23,6 +23,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('daf29d8d-e928-4a01-9a8c-b129603f3fc0')
def test_min_count_less_than_one(self):
+ # Creating server with min_count=0 should fail.
invalid_min_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=invalid_min_count)
@@ -30,6 +31,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('999aa722-d624-4423-b813-0d1ac9884d7a')
def test_min_count_non_integer(self):
+ # Creating server with non-integer min_count should fail.
invalid_min_count = 2.5
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=invalid_min_count)
@@ -37,6 +39,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
def test_max_count_less_than_one(self):
+ # Creating server with max_count < 1 shoudld fail.
invalid_max_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
@@ -44,6 +47,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9c5698d1-d7af-4c80-b971-9d403135eea2')
def test_max_count_non_integer(self):
+ # Creating server with non-integer max_count should fail.
invalid_max_count = 2.5
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
@@ -51,6 +55,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('476da616-f1ef-4271-a9b1-b9fc87727cdf')
def test_max_count_less_than_min_count(self):
+ # Creating server with max_count less than min_count should fail.
min_count = 3
max_count = 2
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 5801db1..68e09e7 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -16,6 +16,7 @@
import struct
import six
+import six.moves.urllib.parse as urlparse
import urllib3
from tempest.api.compute import base
@@ -32,6 +33,7 @@
class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -73,8 +75,9 @@
'initial call: ' + six.text_type(resp.status))
# Do some basic validation to make sure it is an expected HTML document
resp_data = resp.data.decode()
- self.assertIn('<html>', resp_data,
- 'Not a valid html document in the response.')
+ # This is needed in the case of example: <html lang="en">
+ self.assertRegex(resp_data, '<html.*>',
+ 'Not a valid html document in the response.')
self.assertIn('</html>', resp_data,
'Not a valid html document in the response.')
# Just try to make sure we got JavaScript back for noVNC, since we
@@ -143,7 +146,7 @@
data_length = len(data) if data is not None else 0
self.assertFalse(data_length <= 24 or
data_length != (struct.unpack(">L",
- data[20:24])[0] + 24),
+ data[20:24])[0] + 24),
'Server initialization was not the right format.')
# Since the rest of the data on the screen is arbitrary, we will
# close the socket and end our validation of the data at this point
@@ -151,25 +154,27 @@
# initialization was the right format
self.assertFalse(data_length <= 24 or
data_length != (struct.unpack(">L",
- data[20:24])[0] + 24))
+ data[20:24])[0] + 24))
def _validate_websocket_upgrade(self):
+ """Verify that the websocket upgrade was successful.
+
+ Parses response and ensures that required response
+ fields are present and accurate.
+ (https://tools.ietf.org/html/rfc7231#section-6.2.2)
+ """
+
self.assertTrue(
self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
- b'Protocols\r\n'),
- 'Did not get the expected 101 on the {} call: {}'.format(
- CONF.compute_feature_enabled.vnc_server_header,
+ b'Protocols'),
+ 'Incorrect HTTP return status code: {}'.format(
six.text_type(self._websocket.response)
)
)
- # Since every other server type returns Headers with different case
- # (for example 'nginx'), lowercase must be applied to eliminate issues.
- _desired_header = "server: {0}".format(
- CONF.compute_feature_enabled.vnc_server_header
- ).lower()
+ _required_header = 'upgrade: websocket'
_response = six.text_type(self._websocket.response).lower()
self.assertIn(
- _desired_header,
+ _required_header,
_response,
'Did not get the expected WebSocket HTTP Response.'
)
@@ -204,7 +209,18 @@
type='novnc')['console']
self.assertEqual('novnc', body['type'])
# Do the WebSockify HTTP Request to novncproxy with a bad token
- url = body['url'].replace('token=', 'token=bad')
+ parts = urlparse.urlparse(body['url'])
+ qparams = urlparse.parse_qs(parts.query)
+ if 'path' in qparams:
+ qparams['path'] = urlparse.unquote(qparams['path'][0]).replace(
+ 'token=', 'token=bad')
+ elif 'token' in qparams:
+ qparams['token'] = 'bad' + qparams['token'][0]
+ new_query = urlparse.urlencode(qparams)
+ new_parts = urlparse.ParseResult(parts.scheme, parts.netloc,
+ parts.path, parts.params, new_query,
+ parts.fragment)
+ url = urlparse.urlunparse(new_parts)
self._websocket = compute.create_websocket(url)
# Make sure the novncproxy rejected the connection and closed it
data = self._websocket.receive_frame()
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f3d7476..d477be0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -92,6 +92,7 @@
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, newserver['id'])
# The server's password should be set to the provided password
new_password = 'Newpass1234'
self.client.change_password(newserver['id'], adminPass=new_password)
@@ -288,6 +289,17 @@
self.assertEqual('in-use', vol_after_rebuild['status'])
self.assertEqual(self.server_id,
vol_after_rebuild['attachments'][0]['server_id'])
+ if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.ssh_user,
+ password=None,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.client)
+ linux_client.validate_authentication()
def _test_resize_server_confirm(self, server_id, stop=False):
# The server's RAM and disk space should be modified to that of
@@ -331,17 +343,27 @@
def test_resize_volume_backed_server_confirm(self):
# We have to create a new server that is volume-backed since the one
# from setUp is not volume-backed.
- server = self.create_test_server(
- volume_backed=True, wait_until='ACTIVE')
+ kwargs = {'volume_backed': True,
+ 'wait_until': 'ACTIVE'}
+ if CONF.validation.run_validation:
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ kwargs.update({'validatable': True,
+ 'validation_resources': validation_resources})
+ server = self.create_test_server(**kwargs)
+
+ # NOTE(mgoddard): Get detailed server to ensure addresses are present
+ # in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
+
self._test_resize_server_confirm(server['id'])
+
if CONF.compute_feature_enabled.console_output:
# Now do something interactive with the guest like get its console
# output; we don't actually care about the output,
# just that it doesn't raise an error.
self.client.get_console_output(server['id'])
if CONF.validation.run_validation:
- validation_resources = self.get_class_validation_resources(
- self.os_primary)
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
@@ -405,10 +427,7 @@
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
# Make sure everything still looks OK.
server = self.client.show_server(self.server_id)['server']
- # The flavor id is not returned in the server response after
- # microversion 2.46 so handle that gracefully.
- if server['flavor'].get('id'):
- self.assertEqual(self.flavor_ref, server['flavor']['id'])
+ self.assert_flavor_equal(self.flavor_ref, server['flavor'])
attached_volumes = server['os-extended-volumes:volumes_attached']
self.assertEqual(1, len(attached_volumes))
self.assertEqual(volume['id'], attached_volumes[0]['id'])
@@ -695,16 +714,13 @@
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled.')
def test_get_vnc_console(self):
- # Get the VNC console of type 'novnc' and 'xvpvnc'
- console_types = ['novnc', 'xvpvnc']
- for console_type in console_types:
- if self.is_requested_microversion_compatible('2.5'):
- body = self.client.get_vnc_console(
- self.server_id, type=console_type)['console']
- else:
- body = self.client.get_remote_console(
- self.server_id, console_type=console_type,
- protocol='vnc')['remote_console']
- self.assertEqual(console_type, body['type'])
- self.assertNotEqual('', body['url'])
- self._validate_url(body['url'])
+ if self.is_requested_microversion_compatible('2.5'):
+ body = self.client.get_vnc_console(
+ self.server_id, type='novnc')['console']
+ else:
+ body = self.client.get_remote_console(
+ self.server_id, console_type='novnc',
+ protocol='vnc')['remote_console']
+ self.assertEqual('novnc', body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index f79b05f..c936ce5 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -19,12 +19,7 @@
class ServerAddressesTestJSON(base.BaseV2ComputeTest):
-
- @classmethod
- def setup_credentials(cls):
- # This test module might use a network and a subnet
- cls.set_network_resources(network=True, subnet=True)
- super(ServerAddressesTestJSON, cls).setup_credentials()
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_addresses_negative.py b/tempest/api/compute/servers/test_server_addresses_negative.py
index b2b3cc0..f33c6d9 100644
--- a/tempest/api/compute/servers/test_server_addresses_negative.py
+++ b/tempest/api/compute/servers/test_server_addresses_negative.py
@@ -20,11 +20,7 @@
class ServerAddressesNegativeTestJSON(base.BaseV2ComputeTest):
-
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources(network=True, subnet=True)
- super(ServerAddressesNegativeTestJSON, cls).setup_credentials()
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 1b7cb96..4b5efaa 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -29,6 +29,7 @@
policies = affinity/anti-affinity
It also adds the tests for list and get details of server-groups
"""
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index fe95018..9d87e1c 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -18,6 +18,7 @@
class ServerMetadataTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 482ba09..5688af1 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -20,6 +20,7 @@
class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index e6a668a..7b31ede 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -19,6 +19,7 @@
class ServerPasswordTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index b0ef3bc..5a646f9 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -24,11 +24,12 @@
CONF = config.CONF
-class ServerRescueTestJSON(base.BaseV2ComputeTest):
+class ServerRescueTestBase(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
- super(ServerRescueTestJSON, cls).skip_checks()
+ super(ServerRescueTestBase, cls).skip_checks()
if not CONF.compute_feature_enabled.rescue:
msg = "Server rescue not available."
raise cls.skipException(msg)
@@ -36,11 +37,11 @@
@classmethod
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, router=True)
- super(ServerRescueTestJSON, cls).setup_credentials()
+ super(ServerRescueTestBase, cls).setup_credentials()
@classmethod
def resource_setup(cls):
- super(ServerRescueTestJSON, cls).resource_setup()
+ super(ServerRescueTestBase, cls).resource_setup()
password = data_utils.rand_password()
server = cls.create_test_server(adminPass=password,
@@ -50,6 +51,9 @@
'RESCUE')
cls.rescued_server_id = server['id']
+
+class ServerRescueTestJSON(ServerRescueTestBase):
+
@decorators.idempotent_id('fd032140-714c-42e4-a8fd-adcd8df06be6')
def test_rescue_unrescue_instance(self):
password = data_utils.rand_password()
@@ -62,6 +66,15 @@
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
+
+class ServerRescueTestJSONUnderV235(ServerRescueTestBase):
+
+ max_microversion = '2.35'
+
+ # TODO(zhufl): After 2.35 we should switch to neutron client to create
+ # floating ip, but that will need admin credential, so the testcases will
+ # have to be added in somewhere in admin directory.
+
@decorators.idempotent_id('4842e0cf-e87d-4d9d-b61f-f4791da3cacc')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@@ -91,3 +104,132 @@
# Delete Security group
self.servers_client.remove_security_group(self.rescued_server_id,
name=sg['name'])
+
+
+class BaseServerStableDeviceRescueTest(base.BaseV2ComputeTest):
+ create_default_network = True
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseServerStableDeviceRescueTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.rescue:
+ msg = "Server rescue not available."
+ raise cls.skipException(msg)
+ if not CONF.compute_feature_enabled.stable_rescue:
+ msg = "Stable rescue not available."
+ raise cls.skipException(msg)
+
+ def _create_server_and_rescue_image(self, hw_rescue_device=None,
+ hw_rescue_bus=None,
+ block_device_mapping_v2=None):
+ if block_device_mapping_v2:
+ server_id = self.create_test_server(
+ wait_until='ACTIVE',
+ block_device_mapping_v2=block_device_mapping_v2)['id']
+ else:
+ server_id = self.create_test_server(wait_until='ACTIVE')['id']
+
+ image_id = self.create_image_from_server(server_id,
+ wait_until='ACTIVE')['id']
+ if hw_rescue_bus:
+ self.images_client.update_image(
+ image_id, [dict(add='/hw_rescue_bus',
+ value=hw_rescue_bus)])
+ if hw_rescue_device:
+ self.images_client.update_image(
+ image_id, [dict(add='/hw_rescue_device',
+ value=hw_rescue_device)])
+ return server_id, image_id
+
+ def _test_stable_device_rescue(self, server_id, rescue_image_id):
+ self.servers_client.rescue_server(
+ server_id, rescue_image_ref=rescue_image_id)
+ waiters.wait_for_server_status(
+ self.servers_client, server_id, 'RESCUE')
+ self.servers_client.unrescue_server(server_id)
+ waiters.wait_for_server_status(
+ self.servers_client, server_id, 'ACTIVE')
+
+
+class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
+
+ @decorators.idempotent_id('947004c3-e8ef-47d9-9f00-97b74f9eaf96')
+ def test_stable_device_rescue_cdrom_ide(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='cdrom', hw_rescue_bus='ide')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
+ def test_stable_device_rescue_disk_virtio(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('12340157-6306-4745-bdda-cfa019908b48')
+ def test_stable_device_rescue_disk_scsi(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='scsi')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('647d04cf-ad35-4956-89ab-b05c5c16f30c')
+ def test_stable_device_rescue_disk_usb(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='usb')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
+ def test_stable_device_rescue_disk_virtio_with_volume_attached(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio')
+ server = self.servers_client.show_server(server_id)['server']
+ volume = self.create_volume()
+ self.attach_volume(server, volume)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+
+class ServerBootFromVolumeStableRescueTest(BaseServerStableDeviceRescueTest):
+
+ min_microversion = '2.87'
+
+ @decorators.idempotent_id('48f123cb-922a-4065-8db6-b9a9074a556b')
+ def test_stable_device_rescue_bfv_blank_volume(self):
+ block_device_mapping_v2 = [{
+ "boot_index": "0",
+ "source_type": "blank",
+ "volume_size": CONF.volume.volume_size,
+ "destination_type": "volume"}]
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio',
+ block_device_mapping_v2=block_device_mapping_v2)
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('e4636333-c928-40fc-98b7-70a23eef4224')
+ def test_stable_device_rescue_bfv_image_volume(self):
+ block_device_mapping_v2 = [{
+ "boot_index": "0",
+ "source_type": "image",
+ "volume_size": CONF.volume.volume_size,
+ "uuid": CONF.compute.image_ref,
+ "destination_type": "volume"}]
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio',
+ block_device_mapping_v2=block_device_mapping_v2)
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('7fcc5d2c-130e-4750-95f5-7343f9d0a2f3')
+ def test_stable_device_rescue_bfv_snapshot_volume(self):
+ volume_id = self.create_volume()['id']
+ self.volumes_client.set_bootable_volume(volume_id, bootable=True)
+ snapshot_id = self.create_volume_snapshot(volume_id)['id']
+ block_device_mapping_v2 = [{
+ "boot_index": "0",
+ "source_type": "snapshot",
+ "volume_size": CONF.volume.volume_size,
+ "uuid": snapshot_id,
+ "destination_type": "volume"}]
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio',
+ block_device_mapping_v2=block_device_mapping_v2)
+ self._test_stable_device_rescue(server_id, rescue_image_id)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 1260c6b..caceb64 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -43,7 +43,6 @@
@classmethod
def resource_setup(cls):
super(ServerRescueNegativeTestJSON, cls).resource_setup()
- cls.device = CONF.compute.volume_device_name
cls.password = data_utils.rand_password()
rescue_password = data_utils.rand_password()
# Server for negative tests
@@ -125,8 +124,7 @@
self.assertRaises(lib_exc.Conflict,
self.servers_client.attach_volume,
self.server_id,
- volumeId=volume['id'],
- device='/dev/%s' % self.device)
+ volumeId=volume['id'])
@decorators.idempotent_id('f56e465b-fe10-48bf-b75d-646cda3a8bc9')
@utils.services('volume')
@@ -136,7 +134,7 @@
# Attach the volume to the server
server = self.servers_client.show_server(self.server_id)['server']
- self.attach_volume(server, volume, device='/dev/%s' % self.device)
+ self.attach_volume(server, volume)
# Rescue the server
self.servers_client.rescue_server(self.server_id,
diff --git a/tempest/api/compute/servers/test_server_tags.py b/tempest/api/compute/servers/test_server_tags.py
index 8d0a4e3..3893b01 100644
--- a/tempest/api/compute/servers/test_server_tags.py
+++ b/tempest/api/compute/servers/test_server_tags.py
@@ -26,6 +26,8 @@
min_microversion = '2.26'
max_microversion = 'latest'
+ create_default_network = True
+
@classmethod
def skip_checks(cls):
super(ServerTagsTestJSON, cls).skip_checks()
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 56d973e..3a4bd6d 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -19,13 +19,13 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
@@ -40,11 +40,7 @@
# If an admin password is provided on server creation, the server's
# root password should be set to that password.
server = self.create_test_server(adminPass='testpassword')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# Verify the password is set correctly in the response
self.assertEqual('testpassword', server['adminPass'])
@@ -59,19 +55,11 @@
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id1 = server['id']
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, id1)
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, id1)
+ self.addCleanup(self.delete_server, id1)
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id2 = server['id']
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, id2)
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, id2)
+ self.addCleanup(self.delete_server, id2)
self.assertNotEqual(id1, id2, "Did not create a new server")
server = self.client.show_server(id1)['server']
name1 = server['name']
@@ -87,13 +75,9 @@
self.keypairs_client.create_keypair(name=key_name)
self.addCleanup(self.keypairs_client.delete_keypair, key_name)
self.keypairs_client.list_keypairs()
- server = self.create_test_server(key_name=key_name)
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
- waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
+ server = self.create_test_server(key_name=key_name,
+ wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, server['id'])
server = self.client.show_server(server['id'])['server']
self.assertEqual(key_name, server['key_name'])
@@ -115,11 +99,7 @@
def test_update_server_name(self):
# The server name should be changed to the provided value
server = self.create_test_server(wait_until='ACTIVE')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# Update instance name with non-ASCII characters
prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
self._update_server_name(server['id'], 'ACTIVE', prefix_name)
@@ -137,11 +117,7 @@
def test_update_access_server_address(self):
# The server's access addresses should reflect the provided values
server = self.create_test_server(wait_until='ACTIVE')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# Update the IPv4 and IPv6 access addresses
self.client.update_server(server['id'],
@@ -157,13 +133,9 @@
@decorators.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb')
def test_create_server_with_ipv6_addr_only(self):
# Create a server without an IPv4 address(only IPv6 address).
- server = self.create_test_server(accessIPv6='2001:2001::3')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
- waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
+ server = self.create_test_server(accessIPv6='2001:2001::3',
+ wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, server['id'])
server = self.client.show_server(server['id'])['server']
self.assertEqual('2001:2001::3', server['accessIPv6'])
@@ -215,10 +187,17 @@
min_microversion = '2.63'
max_microversion = 'latest'
+ @testtools.skipUnless(CONF.compute.certified_image_ref,
+ '``[compute]/certified_image_ref`` required to test '
+ 'image certificate validation.')
+ @testtools.skipUnless(CONF.compute.certified_image_trusted_certs,
+ '``[compute]/certified_image_trusted_certs`` '
+ 'required to test image certificate validation.')
@decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
def test_show_update_rebuild_list_server(self):
- trusted_certs = ['test-cert-1', 'test-cert-2']
+ trusted_certs = CONF.compute.certified_image_trusted_certs
server = self.create_test_server(
+ image_id=CONF.compute.certified_image_ref,
trusted_image_certificates=trusted_certs,
wait_until='ACTIVE')
@@ -231,7 +210,8 @@
server['id'], 'ACTIVE')
# Check rebuild API response schema
- self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+ self.servers_client.rebuild_server(
+ server['id'], CONF.compute.certified_image_ref)
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 6cabf65..7fa30b0 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -30,6 +30,7 @@
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
@@ -554,6 +555,7 @@
class ServersNegativeTestMultiTenantJSON(base.BaseV2ComputeTest):
+ create_default_network = True
credentials = ['primary', 'alt']
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index f810ec5..dfd6ca4 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -32,11 +32,7 @@
depends_on_nova_network = True
- @classmethod
- def setup_credentials(cls):
- # This test needs a network and a subnet
- cls.set_network_resources(network=True, subnet=True)
- super(VirtualInterfacesTestJSON, cls).setup_credentials()
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 34faf5f..12e7fea 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -37,7 +37,7 @@
ext = CONF.compute_feature_enabled.api_extensions[0]
# Log extensions list
- extension_list = map(lambda x: x['alias'], extensions)
+ extension_list = [x['alias'] for x in extensions]
LOG.debug("Nova extensions: %s", ','.join(extension_list))
if ext == 'all':
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 811b521..97813a5 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -28,6 +28,7 @@
class BaseAttachVolumeTest(base.BaseV2ComputeTest):
"""Base class for the attach volume tests in this module."""
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -41,11 +42,6 @@
cls.prepare_instance_network()
super(BaseAttachVolumeTest, cls).setup_credentials()
- @classmethod
- def resource_setup(cls):
- super(BaseAttachVolumeTest, cls).resource_setup()
- cls.device = CONF.compute.volume_device_name
-
def _create_server(self):
# Start a server and wait for it to become ready
validation_resources = self.get_test_validation_resources(
@@ -65,6 +61,8 @@
class AttachVolumeTestJSON(BaseAttachVolumeTest):
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
+ # This test is conditionally marked slow if SSH validation is enabled.
+ @decorators.attr(type='slow', condition=CONF.validation.run_validation)
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
@@ -82,10 +80,18 @@
# NOTE(andreaf) We need to ensure the ssh key has been
# injected in the guest before we power cycle
linux_client.validate_authentication()
+ disks_before_attach = linux_client.list_disks()
volume = self.create_volume()
- attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+
+ # NOTE: As of the 12.0.0 Liberty release, the Nova libvirt driver
+ # no longer honors a user-supplied device name, and there can be
+ # a mismatch between libvirt provide disk name and actual disk name
+ # on instance, hence we no longer validate this test with the supplied
+ # device name rather we count number of disk before attach
+ # detach to validate the testcase.
+
+ attachment = self.attach_volume(server, volume)
self.servers_client.stop_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
@@ -96,9 +102,10 @@
'ACTIVE')
if CONF.validation.run_validation:
- disks = linux_client.get_disks()
- device_name_to_match = '\n' + self.device + ' '
- self.assertIn(device_name_to_match, disks)
+ disks_after_attach = linux_client.list_disks()
+ self.assertGreater(
+ len(disks_after_attach),
+ len(disks_before_attach))
self.servers_client.detach_volume(server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
@@ -113,16 +120,15 @@
'ACTIVE')
if CONF.validation.run_validation:
- disks = linux_client.get_disks()
- self.assertNotIn(device_name_to_match, disks)
+ disks_after_detach = linux_client.list_disks()
+ self.assertEqual(len(disks_before_attach), len(disks_after_detach))
@decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
def test_list_get_volume_attachments(self):
# List volume attachment of the server
- server, _ = self._create_server()
+ server, validation_resources = self._create_server()
volume_1st = self.create_volume()
- attachment_1st = self.attach_volume(server, volume_1st,
- device=('/dev/%s' % self.device))
+ attachment_1st = self.attach_volume(server, volume_1st)
body = self.servers_client.list_volume_attachments(
server['id'])['volumeAttachments']
self.assertEqual(1, len(body))
@@ -143,6 +149,16 @@
server['id'])['volumeAttachments']
self.assertEqual(2, len(body))
+ if CONF.validation.run_validation:
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.image_ssh_user,
+ self.image_ssh_password,
+ validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+ linux_client.validate_authentication()
+
for attachment in [attachment_1st, attachment_2nd]:
body = self.servers_client.show_volume_attachment(
server['id'], attachment['id'])['volumeAttachment']
@@ -234,8 +250,7 @@
volume = self.create_volume()
num_vol = self._count_volumes(server, validation_resources)
self._shelve_server(server, validation_resources)
- attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ attachment = self.attach_volume(server, volume)
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(
@@ -264,7 +279,7 @@
self._shelve_server(server, validation_resources)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device))
+ self.attach_volume(server, volume)
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 8618148..9a506af 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -21,6 +21,7 @@
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -35,9 +36,7 @@
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
-
- path = "/dev/%s" % CONF.compute.volume_device_name
- self.attach_volume(server, volume, device=path)
+ self.attach_volume(server, volume)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index b8ca81d..f3ccf8d 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -27,7 +27,7 @@
# These tests will fail with a 404 starting from microversion 2.36. For
# more information, see:
- # https://developer.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
+ # https://docs.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
max_microversion = '2.35'
@classmethod
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index d83d49e..0d23c1f 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -28,7 +28,7 @@
# These tests will fail with a 404 starting from microversion 2.36. For
# more information, see:
- # https://developer.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
+ # https://docs.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
max_microversion = '2.35'
@classmethod
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index b2aebe7..28bc174 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -29,7 +29,7 @@
# These tests will fail with a 404 starting from microversion 2.36. For
# more information, see:
- # https://developer.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
+ # https://docs.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
max_microversion = '2.35'
@classmethod
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index 87f7d8a..444ce93 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -26,7 +26,7 @@
# These tests will fail with a 404 starting from microversion 2.36. For
# more information, see:
- # https://developer.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
+ # https://docs.openstack.org/api-ref/compute/#volume-extension-os-volumes-os-snapshots-deprecated
max_microversion = '2.35'
@classmethod
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index 947706e..236ce7c 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -19,6 +19,7 @@
class EndPointsTestJSON(base.BaseIdentityV2AdminTest):
+ """Test keystone v2 endpoints"""
@classmethod
def resource_setup(cls):
@@ -51,6 +52,7 @@
@decorators.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
def test_list_endpoints(self):
+ """Test listing keystone endpoints"""
# Get a list of endpoints
fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
# Asserting LIST endpoints
@@ -62,6 +64,7 @@
@decorators.idempotent_id('9974530a-aa28-4362-8403-f06db02b26c1')
def test_create_list_delete_endpoint(self):
+ """Test creating, listing and deleting a keystone endpoint"""
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = self.endpoints_client.create_endpoint(
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index e2ed5ef..03543ac 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -89,14 +89,10 @@
service = self.services_client.create_service(
name=name, type=s_type,
description=description)['OS-KSADM:service']
+ self.addCleanup(self.services_client.delete_service, service['id'])
services.append(service)
service_ids = [svc['id'] for svc in services]
- def delete_services():
- for service_id in service_ids:
- self.services_client.delete_service(service_id)
-
- self.addCleanup(delete_services)
# List and Verify Services
body = self.services_client.list_services()['OS-KSADM:services']
found = [serv for serv in body if serv['id'] in service_ids]
diff --git a/tempest/api/identity/admin/v2/test_tenants.py b/tempest/api/identity/admin/v2/test_tenants.py
index cda721c..f68754e 100644
--- a/tempest/api/identity/admin/v2/test_tenants.py
+++ b/tempest/api/identity/admin/v2/test_tenants.py
@@ -50,7 +50,7 @@
'been sent in response for create')
body = self.tenants_client.show_tenant(tenant_id)['tenant']
desc2 = body['description']
- self.assertEqual(desc2, tenant_desc, 'Description does not appear'
+ self.assertEqual(desc2, tenant_desc, 'Description does not appear '
'to be set')
self.tenants_client.delete_tenant(tenant_id)
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index ba19ff7..23fe788 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -20,6 +20,10 @@
class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def resource_setup(cls):
@@ -27,10 +31,6 @@
cls.projects = list()
cls.creds_list = [['project_id', 'user_id', 'id'],
['access', 'secret']]
- u_name = data_utils.rand_name('user')
- u_desc = '%s description' % u_name
- u_email = '%s@testmail.tm' % u_name
- u_password = data_utils.rand_password()
for _ in range(2):
project = cls.projects_client.create_project(
data_utils.rand_name('project'),
@@ -38,12 +38,8 @@
cls.addClassResourceCleanup(
cls.projects_client.delete_project, project['id'])
cls.projects.append(project['id'])
-
- cls.user_body = cls.users_client.create_user(
- name=u_name, description=u_desc, password=u_password,
- email=u_email, project_id=cls.projects[0])['user']
- cls.addClassResourceCleanup(
- cls.users_client.delete_user, cls.user_body['id'])
+ cls.user_body = cls.users_client.show_user(
+ cls.os_primary.credentials.user_id)['user']
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index 302a0e5..7c3a6cc 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.identity import base
from tempest import clients
from tempest import config
@@ -19,7 +21,8 @@
CONF = config.CONF
-class TestDefaultProjectId (base.BaseIdentityV3AdminTest):
+class TestDefaultProjectId(base.BaseIdentityV3AdminTest):
+ """Test creating a token without project will default to user's project"""
@classmethod
def setup_credentials(cls):
@@ -32,8 +35,12 @@
self.domains_client.update_domain(domain_id, enabled=False)
self.domains_client.delete_domain(domain_id)
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
@decorators.idempotent_id('d6110661-6a71-49a7-a453-b5e26640ff6d')
def test_default_project_id(self):
+ """Creating a token without project will default to user's project"""
# create a domain
dom_name = data_utils.rand_name('dom')
domain_body = self.domains_client.create_domain(
@@ -51,9 +58,10 @@
# create a user in the domain, with the previous project as his
# default project
user_name = data_utils.rand_name('user')
+ user_pass = data_utils.rand_password()
user_body = self.users_client.create_user(
name=user_name,
- password=user_name,
+ password=user_pass,
domain_id=dom_id,
default_project_id=proj_id)['user']
user_id = user_body['id']
@@ -72,7 +80,7 @@
# create a new client with user's credentials (NOTE: unscoped token!)
creds = auth.KeystoneV3Credentials(username=user_name,
- password=user_name,
+ password=user_pass,
user_domain_name=dom_name)
auth_provider = clients.get_auth_provider(creds)
creds = auth_provider.fill_credentials()
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index c4e0622..c0b18ca 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -21,6 +21,10 @@
class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
custom_config = {
"identity": {
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 72b6be4..07175f4 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -153,18 +153,3 @@
expected_data = {'name': d_name, 'enabled': True}
self.assertEqual('', domain['description'])
self.assertDictContainsSubset(expected_data, domain)
-
-
-class DefaultDomainTestJSON(base.BaseIdentityV3AdminTest):
-
- @classmethod
- def resource_setup(cls):
- cls.domain_id = CONF.identity.default_domain_id
- super(DefaultDomainTestJSON, cls).resource_setup()
-
- @decorators.attr(type='smoke')
- @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
- def test_default_domain_exists(self):
- domain = self.domains_client.show_domain(self.domain_id)['domain']
-
- self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index 56f7d32..b3c68fb 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -20,6 +20,10 @@
class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.attr(type=['negative', 'gate'])
@decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index eef93c2..7d85dc9 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -20,6 +20,10 @@
class EndPointGroupsTest(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def setup_clients(cls):
@@ -65,6 +69,7 @@
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
def test_create_list_show_check_delete_endpoint_group(self):
service_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
filters = {'service_id': service_id}
@@ -125,6 +130,7 @@
# Creating an endpoint group so as to check update endpoint group
# with new values
service1_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service1_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
filters = {'service_id': service1_id}
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 2cd8906..366d6a0 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -44,11 +44,14 @@
cls.addClassResourceCleanup(
cls.services_client.delete_service, service['id'])
- region = data_utils.rand_name('region')
+ region_name = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.client.create_endpoint(
service_id=cls.service_ids[i], interface=interfaces[i],
- url=url, region=region, enabled=True)['endpoint']
+ url=url, region=region_name, enabled=True)['endpoint']
+ region = cls.regions_client.show_region(region_name)['region']
+ cls.addClassResourceCleanup(
+ cls.regions_client.delete_region, region['id'])
cls.addClassResourceCleanup(
cls.client.delete_endpoint, endpoint['id'])
cls.setup_endpoint_ids.append(endpoint['id'])
@@ -108,17 +111,19 @@
@decorators.idempotent_id('0e2446d2-c1fd-461b-a729-b9e73e3e3b37')
def test_create_list_show_delete_endpoint(self):
- region = data_utils.rand_name('region')
+ region_name = data_utils.rand_name('region')
url = data_utils.rand_url()
interface = 'public'
endpoint = self.client.create_endpoint(service_id=self.service_ids[0],
interface=interface,
- url=url, region=region,
+ url=url, region=region_name,
enabled=True)['endpoint']
+ region = self.regions_client.show_region(region_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_endpoint, endpoint['id'])
# Asserting Create Endpoint response body
- self.assertEqual(region, endpoint['region'])
+ self.assertEqual(region_name, endpoint['region'])
self.assertEqual(url, endpoint['url'])
# Checking if created endpoint is present in the list of endpoints
@@ -133,7 +138,7 @@
self.assertEqual(self.service_ids[0], fetched_endpoint['service_id'])
self.assertEqual(interface, fetched_endpoint['interface'])
self.assertEqual(url, fetched_endpoint['url'])
- self.assertEqual(region, fetched_endpoint['region'])
+ self.assertEqual(region_name, fetched_endpoint['region'])
self.assertEqual(True, fetched_endpoint['enabled'])
# Deleting the endpoint created in this method
@@ -161,28 +166,33 @@
self.addCleanup(self.services_client.delete_service, service2['id'])
# Creating an endpoint so as to check update endpoint with new values
- region1 = data_utils.rand_name('region')
+ region1_name = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
endpoint_for_update = (
self.client.create_endpoint(service_id=self.service_ids[0],
interface=interface1,
- url=url1, region=region1,
+ url=url1, region=region1_name,
enabled=True)['endpoint'])
- self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+ region1 = self.regions_client.show_region(region1_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region1['id'])
# Updating endpoint with new values
- region2 = data_utils.rand_name('region')
+ region2_name = data_utils.rand_name('region')
url2 = data_utils.rand_url()
interface2 = 'internal'
endpoint = self.client.update_endpoint(endpoint_for_update['id'],
service_id=service2['id'],
interface=interface2,
- url=url2, region=region2,
+ url=url2, region=region2_name,
enabled=False)['endpoint']
+ region2 = self.regions_client.show_region(region2_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region2['id'])
+ self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+
# Asserting if the attributes of endpoint are updated
self.assertEqual(service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
self.assertEqual(url2, endpoint['url'])
- self.assertEqual(region2, endpoint['region'])
+ self.assertEqual(region2_name, endpoint['region'])
self.assertEqual(False, endpoint['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 4c3eb1c..164b577 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -70,14 +70,16 @@
def _assert_update_raises_bad_request(self, enabled):
# Create an endpoint
- region1 = data_utils.rand_name('region')
+ region1_name = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
endpoint_for_update = (
self.client.create_endpoint(service_id=self.service_id,
interface=interface1,
- url=url1, region=region1,
+ url=url1, region=region1_name,
enabled=True)['endpoint'])
+ region1 = self.regions_client.show_region(region1_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region1['id'])
self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
self.assertRaises(lib_exc.BadRequest, self.client.update_endpoint,
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 37ce266..2dd1fe2 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
from tempest import config
@@ -22,6 +23,10 @@
class GroupsV3TestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def resource_setup(cls):
@@ -68,6 +73,10 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('1598521a-2f36-4606-8df9-30772bd51339')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_group_users_add_list_delete(self):
group = self.setup_test_group(domain_id=self.domain['id'])
# add user into group
@@ -90,6 +99,10 @@
self.assertEqual(len(group_users), 0)
@decorators.idempotent_id('64573281-d26a-4a52-b899-503cb0f4e4ec')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_list_user_groups(self):
# create a user
user = self.create_test_user()
@@ -101,6 +114,13 @@
self.groups_client.add_group_user(group['id'], user['id'])
# list groups which user belongs to
user_groups = self.users_client.list_user_groups(user['id'])['groups']
+ # The `membership_expires_at` attribute is present when listing user
+ # group memberships, and is not an attribute of the groups themselves.
+ # Therefore we remove it from the comparison.
+ for g in user_groups:
+ if 'membership_expires_at' in g:
+ self.assertIsNone(g['membership_expires_at'])
+ del(g['membership_expires_at'])
self.assertEqual(sorted(groups, key=lambda k: k['name']),
sorted(user_groups, key=lambda k: k['name']))
self.assertEqual(2, len(user_groups))
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index 68c0225..2672f71 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -9,14 +9,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
from tempest.common import utils
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class InheritsV3TestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def skip_checks(cls):
@@ -30,7 +38,7 @@
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
- u_password = data_utils.rand_name('pass-')
+ u_password = data_utils.rand_password()
cls.domain = cls.create_domain()
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project-'),
@@ -43,18 +51,26 @@
domain_id=cls.domain['id'])['group']
cls.addClassResourceCleanup(cls.groups_client.delete_group,
cls.group['id'])
- cls.user = cls.users_client.create_user(
- name=u_name, description=u_desc, password=u_password,
- email=u_email, project_id=cls.project['id'],
- domain_id=cls.domain['id'])['user']
- cls.addClassResourceCleanup(cls.users_client.delete_user,
- cls.user['id'])
+ if not CONF.identity_feature_enabled.immutable_user_source:
+ cls.user = cls.users_client.create_user(
+ name=u_name,
+ description=u_desc,
+ password=u_password,
+ email=u_email,
+ project_id=cls.project['id'],
+ domain_id=cls.domain['id']
+ )['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user['id'])
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
@decorators.idempotent_id('4e6f0366-97c8-423c-b2be-41eae6ac91c8')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_check_revoke_roles_on_domains_user(self):
# Create role
src_role = self.setup_test_role()
@@ -103,6 +119,9 @@
self.domain['id'], self.group['id'], src_role['id'])
@decorators.idempotent_id('18b70e45-7687-4b72-8277-b8f1a47d7591')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_check_revoke_roles_on_projects_user(self):
# Create role
src_role = self.setup_test_role()
@@ -134,6 +153,9 @@
self.project['id'], self.group['id'], src_role['id']))
@decorators.idempotent_id('3acf666e-5354-42ac-8e17-8b68893bcd36')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_revoke_user_roles_on_domain(self):
# Create role
src_role = self.setup_test_role()
@@ -178,6 +200,9 @@
self.assertEmpty(assignments)
@decorators.idempotent_id('9f02ccd9-9b57-46b4-8f77-dd5a736f3a06')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_revoke_user_roles_on_project_tree(self):
# Create role
src_role = self.setup_test_role()
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 148b368..cb8ea11 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -13,16 +13,25 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log as logging
+
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
class BaseListProjectsTestJSON(base.BaseIdentityV3AdminTest):
def _list_projects_with_params(self, included, excluded, params, key):
# Validate that projects in ``included`` belongs to the projects
# returned that match ``params`` but not projects in ``excluded``
+ all_projects = self.projects_client.list_projects()['projects']
+ LOG.debug("Complete list of projects available in keystone: %s",
+ all_projects)
body = self.projects_client.list_projects(params)['projects']
for p in included:
self.assertIn(p[key], map(lambda x: x[key], body))
@@ -35,46 +44,24 @@
@classmethod
def resource_setup(cls):
super(ListProjectsTestJSON, cls).resource_setup()
- cls.project_ids = list()
- # Create a domain
- cls.domain = cls.create_domain()
+ domain_id = cls.os_admin.credentials.domain_id
# Create project with domain
- cls.p1_name = data_utils.rand_name('project')
+ p1_name = data_utils.rand_name(cls.__name__)
cls.p1 = cls.projects_client.create_project(
- cls.p1_name, enabled=False,
- domain_id=cls.domain['id'])['project']
+ p1_name, enabled=False, domain_id=domain_id)['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p1['id'])
- cls.project_ids.append(cls.p1['id'])
# Create default project
- p2_name = data_utils.rand_name('project')
+ p2_name = data_utils.rand_name(cls.__name__)
cls.p2 = cls.projects_client.create_project(p2_name)['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p2['id'])
- cls.project_ids.append(cls.p2['id'])
# Create a new project (p3) using p2 as parent project
- p3_name = data_utils.rand_name('project')
+ p3_name = data_utils.rand_name(cls.__name__)
cls.p3 = cls.projects_client.create_project(
p3_name, parent_id=cls.p2['id'])['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p3['id'])
- cls.project_ids.append(cls.p3['id'])
-
- @decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
- def test_list_projects(self):
- # List projects
- list_projects = self.projects_client.list_projects()['projects']
-
- for p in self.project_ids:
- show_project = self.projects_client.show_project(p)['project']
- self.assertIn(show_project, list_projects)
-
- @decorators.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
- def test_list_projects_with_domains(self):
- # List projects with domain
- self._list_projects_with_params(
- [self.p1], [self.p2, self.p3], {'domain_id': self.domain['id']},
- 'domain_id')
@decorators.idempotent_id('0fe7a334-675a-4509-b00e-1c4b95d5dae8')
def test_list_projects_with_enabled(self):
@@ -82,12 +69,6 @@
self._list_projects_with_params(
[self.p1], [self.p2, self.p3], {'enabled': False}, 'enabled')
- @decorators.idempotent_id('fa178524-4e6d-4925-907c-7ab9f42c7e26')
- def test_list_projects_with_name(self):
- # List projects with name
- self._list_projects_with_params(
- [self.p1], [self.p2, self.p3], {'name': self.p1_name}, 'name')
-
@decorators.idempotent_id('6edc66f5-2941-4a17-9526-4073311c1fac')
def test_list_projects_with_parent(self):
# List projects with parent
@@ -97,3 +78,54 @@
self.assertNotEmpty(fetched_projects)
for project in fetched_projects:
self.assertEqual(self.p3['parent_id'], project['parent_id'])
+
+
+class ListProjectsStaticTestJSON(BaseListProjectsTestJSON):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
+
+ @classmethod
+ def resource_setup(cls):
+ super(ListProjectsStaticTestJSON, cls).resource_setup()
+ # Fetch an existing project from os_primary
+ cls.p1 = cls.projects_client.show_project(
+ cls.os_primary.credentials.project_id)['project']
+ # Create a test project
+ p2_name = data_utils.rand_name(cls.__name__)
+ p2_domain_id = CONF.identity.default_domain_id
+ cls.p2 = cls.projects_client.create_project(
+ p2_name, domain_id=p2_domain_id)['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p2['id'])
+
+ @decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
+ def test_list_projects(self):
+ # List projects
+ list_projects = self.projects_client.list_projects()['projects']
+
+ for p in [self.p1, self.p2]:
+ show_project = self.projects_client.show_project(p['id'])[
+ 'project']
+ self.assertIn(show_project, list_projects)
+
+ @decorators.idempotent_id('fa178524-4e6d-4925-907c-7ab9f42c7e26')
+ def test_list_projects_with_name(self):
+ # List projects with name
+ self._list_projects_with_params(
+ [self.p1], [self.p2], {'name': self.p1['name']}, 'name')
+
+ @decorators.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
+ def test_list_projects_with_domains(self):
+ # Verify project list filtered by domain
+ key = 'domain_id'
+ for p in [self.p1, self.p2]:
+ params = {key: p[key]}
+ # Verify filter shows both projects in their respective domains
+ self._list_projects_with_params([p], [], params, key)
+ # Verify filter excludes projects that are filtered out
+ if self.p1[key] != self.p2[key]:
+ exclude = [self.p2]
+ params = {key: self.p1[key]}
+ self._list_projects_with_params([self.p1], exclude, params, key)
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index c69e4c8..7bd0bcf 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -22,6 +22,7 @@
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ """Test listing keystone users"""
def _list_users_with_params(self, params, key, expected, not_expected):
# Helper method to list users filtered with params and
@@ -34,6 +35,14 @@
map(lambda x: x[key], body))
@classmethod
+ def skip_checks(cls):
+ super(UsersV3TestJSON, cls).skip_checks()
+ if CONF.identity_feature_enabled.immutable_user_source:
+ raise cls.skipException('Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
+
+ @classmethod
def resource_setup(cls):
super(UsersV3TestJSON, cls).resource_setup()
alt_user = data_utils.rand_name('test_user')
@@ -61,7 +70,7 @@
@decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
def test_list_user_domains(self):
- # List users with domain
+ """List users with domain"""
params = {'domain_id': self.domain['id']}
self._list_users_with_params(params, 'domain_id',
self.domain_enabled_user,
@@ -69,7 +78,7 @@
@decorators.idempotent_id('bff8bf2f-9408-4ef5-b63a-753c8c2124eb')
def test_list_users_with_not_enabled(self):
- # List the users with not enabled
+ """List the users with not enabled"""
params = {'enabled': False}
self._list_users_with_params(params, 'enabled',
self.non_domain_enabled_user,
@@ -77,7 +86,7 @@
@decorators.idempotent_id('c285bb37-7325-4c02-bff3-3da5d946d683')
def test_list_users_with_name(self):
- # List users with name
+ """List users with name"""
params = {'name': self.domain_enabled_user['name']}
# When domain specific drivers are enabled the operations
# of listing all users and listing all groups are not supported,
@@ -90,7 +99,7 @@
@decorators.idempotent_id('b30d4651-a2ea-4666-8551-0c0e49692635')
def test_list_users(self):
- # List users
+ """List users"""
# When domain specific drivers are enabled the operations
# of listing all users and listing all groups are not supported,
# they need a domain filter to be specified
@@ -112,7 +121,7 @@
@decorators.idempotent_id('b4baa3ae-ac00-4b4e-9e27-80deaad7771f')
def test_get_user(self):
- # Get a user detail
+ """Get a user detail"""
user = self.users_client.show_user(self.users[0]['id'])['user']
self.assertEqual(self.users[0]['id'], user['id'])
self.assertEqual(self.users[0]['name'], user['name'])
diff --git a/tempest/api/identity/admin/v3/test_oauth_consumers.py b/tempest/api/identity/admin/v3/test_oauth_consumers.py
index 062cce5..7a85f84 100644
--- a/tempest/api/identity/admin/v3/test_oauth_consumers.py
+++ b/tempest/api/identity/admin/v3/test_oauth_consumers.py
@@ -21,6 +21,10 @@
class OAUTHConsumersV3Test(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
def _create_consumer(self):
"""Creates a consumer with a random description."""
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 2908fc4..fb81d0a 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -19,13 +19,14 @@
class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
+ """Test keystone policies"""
def _delete_policy(self, policy_id):
self.policies_client.delete_policy(policy_id)
@decorators.idempotent_id('1a0ad286-2d06-4123-ab0d-728893a76201')
def test_list_policies(self):
- # Test to list policies
+ """Test to list keystone policies"""
policy_ids = list()
fetched_ids = list()
for _ in range(3):
@@ -46,7 +47,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('e544703a-2f03-4cf2-9b0f-350782fdb0d3')
def test_create_update_delete_policy(self):
- # Test to update policy
+ """Test to update keystone policy"""
blob = data_utils.rand_name('BlobName')
policy_type = data_utils.rand_name('PolicyType')
policy = self.policies_client.create_policy(blob=blob,
diff --git a/tempest/api/identity/admin/v3/test_project_tags.py b/tempest/api/identity/admin/v3/test_project_tags.py
index d05173b..eed60af 100644
--- a/tempest/api/identity/admin/v3/test_project_tags.py
+++ b/tempest/api/identity/admin/v3/test_project_tags.py
@@ -25,11 +25,18 @@
class IdentityV3ProjectTagsTest(base.BaseIdentityV3AdminTest):
+ """Test keystone project tags"""
+
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.idempotent_id('7c123aac-999d-416a-a0fb-84b915ab10de')
@testtools.skipUnless(CONF.identity_feature_enabled.project_tags,
'Project tags not available.')
def test_list_update_delete_project_tags(self):
+ """Test listing, updating and deleting of project tags"""
project = self.setup_test_project()
# Create a tag for testing.
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 6ddf42e..e46145d 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -12,13 +12,21 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.idempotent_id('0ecf465c-0dc4-4532-ab53-91ffeb74d12d')
def test_project_create_with_description(self):
@@ -31,7 +39,7 @@
'been sent in response for create')
body = self.projects_client.show_project(project_id)['project']
desc2 = body['description']
- self.assertEqual(desc2, project_desc, 'Description does not appear'
+ self.assertEqual(desc2, project_desc, 'Description does not appear '
'to be set')
@decorators.idempotent_id('5f50fe07-8166-430b-a882-3b2ee0abe26f')
@@ -176,6 +184,10 @@
self.assertEqual(resp2_en, resp3_en)
@decorators.idempotent_id('59398d4a-5dc5-4f86-9a4c-c26cc804d6c6')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_associate_user_to_project(self):
# Associate a user to a project
# Create a Project
@@ -218,8 +230,14 @@
_projects = self.projects_client.list_projects()['projects']
project_list = next(x for x in _projects if x['id'] == project['id'])
- # Assert the list of fields is correct (one is enough to check here)
- self.assertSetEqual(set(fields), set(project_get.keys()))
+ # Assert the expected fields exist. More fields than expected may
+ # be in this list. This is for future proofind as keystone does not
+ # and has no plans to support microservices. Any fields in the future
+ # that are added to the response of the API should eventually be added
+ # to the expected fields. The expected fields must be a subset of
+ # the project_get fields (all keys in fields must exist in project_get,
+ # but project_get.keys() may have additional fields)
+ self.assertTrue(set(fields).issubset(project_get.keys()))
# Ensure the set of tags is identical and match the expected one
get_tags = set(project_get.pop("tags"))
diff --git a/tempest/api/identity/admin/v3/test_projects_negative.py b/tempest/api/identity/admin/v3/test_projects_negative.py
index 33a9c8c..12f1d4a 100644
--- a/tempest/api/identity/admin/v3/test_projects_negative.py
+++ b/tempest/api/identity/admin/v3/test_projects_negative.py
@@ -22,6 +22,22 @@
class ProjectsNegativeTestJSON(base.BaseIdentityV3AdminTest):
@decorators.attr(type=['negative'])
+ @decorators.idempotent_id('8d68c012-89e0-4394-8d6b-ccd7196def97')
+ def test_project_delete_by_unauthorized_user(self):
+ # Non-admin user should not be able to delete a project
+ project = self.setup_test_project()
+ self.assertRaises(
+ lib_exc.Forbidden, self.non_admin_projects_client.delete_project,
+ project['id'])
+
+
+class ProjectsNegativeStaticTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
+
+ @decorators.attr(type=['negative'])
@decorators.idempotent_id('24c49279-45dd-4155-887a-cb738c2385aa')
def test_list_projects_by_unauthorized_user(self):
# Non-admin user should not be able to list projects
@@ -63,15 +79,6 @@
self.projects_client.create_project, project_name)
@decorators.attr(type=['negative'])
- @decorators.idempotent_id('8d68c012-89e0-4394-8d6b-ccd7196def97')
- def test_project_delete_by_unauthorized_user(self):
- # Non-admin user should not be able to delete a project
- project = self.setup_test_project()
- self.assertRaises(
- lib_exc.Forbidden, self.non_admin_projects_client.delete_project,
- project['id'])
-
- @decorators.attr(type=['negative'])
@decorators.idempotent_id('7965b581-60c1-43b7-8169-95d4ab7fc6fb')
def test_delete_non_existent_project(self):
# Attempt to delete a non existent project should fail
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index f22a528..c8c0151 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -20,6 +20,10 @@
class RegionsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 47f663c..5ba4c9f 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -12,15 +12,23 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
+CONF = config.CONF
+
class RolesV3TestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def resource_setup(cls):
@@ -48,16 +56,21 @@
domain_id=cls.domain['id'])['group']
cls.addClassResourceCleanup(cls.groups_client.delete_group,
cls.group_body['id'])
- cls.user_body = cls.users_client.create_user(
- name=u_name, description=u_desc, password=cls.u_password,
- email=u_email, project_id=cls.project['id'],
- domain_id=cls.domain['id'])['user']
- cls.addClassResourceCleanup(cls.users_client.delete_user,
- cls.user_body['id'])
cls.role = cls.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
cls.addClassResourceCleanup(cls.roles_client.delete_role,
cls.role['id'])
+ if not CONF.identity_feature_enabled.immutable_user_source:
+ cls.user_body = cls.users_client.create_user(
+ name=u_name,
+ description=u_desc,
+ email=u_email,
+ password=cls.u_password,
+ domain_id=cls.domain['id'],
+ project_id=cls.project['id']
+ )['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user_body['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
@@ -84,6 +97,9 @@
self.assertIn(role['id'], [r['id'] for r in roles])
@decorators.idempotent_id('c6b80012-fe4a-498b-9ce8-eb391c05169f')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_grant_list_revoke_role_to_user_on_project(self):
self.roles_client.create_user_role_on_project(self.project['id'],
self.user_body['id'],
@@ -102,6 +118,9 @@
self.project['id'], self.user_body['id'], self.role['id'])
@decorators.idempotent_id('6c9a2940-3625-43a3-ac02-5dcec62ef3bd')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_grant_list_revoke_role_to_user_on_domain(self):
self.roles_client.create_user_role_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
@@ -119,6 +138,9 @@
self.domain['id'], self.user_body['id'], self.role['id'])
@decorators.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_grant_list_revoke_role_to_group_on_project(self):
# Grant role to group on project
self.roles_client.create_group_role_on_project(
@@ -254,6 +276,9 @@
self.assertIn(self.roles[2]['id'], implies_ids)
@decorators.idempotent_id('c8828027-df48-4021-95df-b65b92c7429e')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_assignments_for_implied_roles_create_delete(self):
# Create a grant using "roles[0]"
self.roles_client.create_user_role_on_project(
@@ -344,6 +369,9 @@
domain_role1['id'])
@decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_assignments_for_domain_roles(self):
domain_role = self.setup_test_role(domain_id=self.domain['id'])
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index 5afeb98..a649d27 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -20,6 +20,7 @@
class ServicesTestJSON(base.BaseIdentityV3AdminTest):
+ """Test keystone services"""
def _del_service(self, service_id):
# Used for deleting the services created in this class
@@ -31,6 +32,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('5193aad5-bcb7-411d-85b0-b3b61b96ef06')
def test_create_update_get_service(self):
+ """Test creating, updating and getting of keystone service"""
# Creating a Service
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
@@ -63,7 +65,7 @@
@decorators.idempotent_id('d1dcb1a1-2b6b-4da8-bbb8-5532ef6e8269')
def test_create_service_without_description(self):
- # Create a service only with name and type
+ """Create a keystone service only with name and type"""
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
service = self.services_client.create_service(
@@ -74,7 +76,7 @@
@decorators.idempotent_id('e55908e8-360e-439e-8719-c3230a3e179e')
def test_list_services(self):
- # Create, List, Verify and Delete Services
+ """Create, List, Verify and Delete Keystone Services"""
service_ids = list()
service_types = list()
for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 8ae43d6..5f1b58d 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -42,10 +42,10 @@
user = self.create_test_user(password=user_password)
# Create a couple projects
- project1_name = data_utils.rand_name(name='project')
+ project1_name = data_utils.rand_name(name=self.__class__.__name__)
project1 = self.setup_test_project(name=project1_name)
- project2_name = data_utils.rand_name(name='project')
+ project2_name = data_utils.rand_name(name=self.__class__.__name__)
project2 = self.setup_test_project(name=project2_name)
self.addCleanup(self.projects_client.delete_project, project2['id'])
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 2530072..78e3cce 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -33,13 +33,16 @@
super(TrustsV3TestJSON, cls).skip_checks()
if not CONF.identity_feature_enabled.trust:
raise cls.skipException("Trusts aren't enabled")
+ if CONF.identity_feature_enabled.immutable_user_source:
+ raise cls.skipException('Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def setUp(self):
super(TrustsV3TestJSON, self).setUp()
# Use alt_username as the trustee
self.trust_id = None
self.create_trustor_and_roles()
- self.addCleanup(self.cleanup_user_and_roles)
def tearDown(self):
if self.trust_id:
@@ -50,11 +53,13 @@
def create_trustor_and_roles(self):
# create a project that trusts will be granted on
- trustor_project_name = data_utils.rand_name(name='project')
+ trustor_project_name = data_utils.rand_name(
+ name=self.__class__.__name__)
project = self.projects_client.create_project(
trustor_project_name,
domain_id=CONF.identity.default_domain_id)['project']
self.trustor_project_id = project['id']
+ self.addCleanup(self.projects_client.delete_project, project['id'])
self.assertIsNotNone(self.trustor_project_id)
# Create a trustor User
@@ -69,6 +74,7 @@
email=u_email,
project_id=self.trustor_project_id,
domain_id=CONF.identity.default_domain_id)['user']
+ self.addCleanup(self.users_client.delete_user, user['id'])
self.trustor_user_id = user['id']
# And two roles, one we'll delegate and one we won't
@@ -76,10 +82,12 @@
self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
role = self.roles_client.create_role(name=self.delegated_role)['role']
+ self.addCleanup(self.roles_client.delete_role, role['id'])
self.delegated_role_id = role['id']
role = self.roles_client.create_role(
name=self.not_delegated_role)['role']
+ self.addCleanup(self.roles_client.delete_role, role['id'])
self.not_delegated_role_id = role['id']
# Assign roles to trustor
@@ -109,16 +117,6 @@
os = clients.Manager(credentials=creds)
self.trustor_client = os.trusts_client
- def cleanup_user_and_roles(self):
- if self.trustor_user_id:
- self.users_client.delete_user(self.trustor_user_id)
- if self.trustor_project_id:
- self.projects_client.delete_project(self.trustor_project_id)
- if self.delegated_role_id:
- self.roles_client.delete_role(self.delegated_role_id)
- if self.not_delegated_role_id:
- self.roles_client.delete_role(self.not_delegated_role_id)
-
def create_trust(self, impersonate=True, expires=None):
trust_create = self.trustor_client.create_trust(
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 3813568..31cbbac 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -27,10 +27,19 @@
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ """Test keystone users"""
+
+ @classmethod
+ def skip_checks(cls):
+ super(UsersV3TestJSON, cls).skip_checks()
+ if CONF.identity_feature_enabled.immutable_user_source:
+ raise cls.skipException('Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
@decorators.idempotent_id('b537d090-afb9-4519-b95d-270b0708e87e')
def test_user_update(self):
- # Test case to check if updating of user attributes is successful.
+ """Test case to check if updating of user attributes is successful"""
# Creating first user
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
@@ -64,6 +73,7 @@
@decorators.idempotent_id('2d223a0e-e457-4a70-9fb1-febe027a0ff9')
def test_update_user_password(self):
+ """Test updating user password"""
# Creating User to check password updation
u_name = data_utils.rand_name('user')
original_password = data_utils.rand_password()
@@ -90,7 +100,7 @@
@decorators.idempotent_id('a831e70c-e35b-430b-92ed-81ebbc5437b8')
def test_list_user_projects(self):
- # List the projects that a user has access upon
+ """Test listing the projects that a user has access upon"""
assigned_project_ids = list()
fetched_project_ids = list()
u_project = self.setup_test_project()
@@ -133,7 +143,7 @@
@decorators.idempotent_id('c10dcd90-461d-4b16-8e23-4eb836c00644')
def test_get_user(self):
- # Get a user detail
+ """Test getting a user detail"""
user = self.setup_test_user()
fetched_user = self.users_client.show_user(user['id'])['user']
self.assertEqual(user['id'], fetched_user['id'])
@@ -142,6 +152,7 @@
'Security compliance not available.')
@decorators.idempotent_id('568cd46c-ee6c-4ab4-a33a-d3791931979e')
def test_password_history_not_enforced_in_admin_reset(self):
+ """Test setting same password when password history is not enforced"""
old_password = self.os_primary.credentials.password
user_id = self.os_primary.credentials.user_id
diff --git a/tempest/api/identity/v2/test_ec2_credentials.py b/tempest/api/identity/v2/test_ec2_credentials.py
index 237e728..9981ef8 100644
--- a/tempest/api/identity/v2/test_ec2_credentials.py
+++ b/tempest/api/identity/v2/test_ec2_credentials.py
@@ -57,18 +57,19 @@
self.creds.user_id,
tenant_id=self.creds.tenant_id)["credential"]
created_creds.append(creds1['access'])
+ self.addCleanup(
+ self.non_admin_users_client.delete_user_ec2_credential,
+ self.creds.user_id, creds1['access'])
+
# create second ec2 credentials
creds2 = self.non_admin_users_client.create_user_ec2_credential(
self.creds.user_id,
tenant_id=self.creds.tenant_id)["credential"]
created_creds.append(creds2['access'])
- # add credentials to be cleaned up
- self.addCleanup(
- self.non_admin_users_client.delete_user_ec2_credential,
- self.creds.user_id, creds1['access'])
self.addCleanup(
self.non_admin_users_client.delete_user_ec2_credential,
self.creds.user_id, creds2['access'])
+
# get the list of user ec2 credentials
resp = self.non_admin_users_client.list_user_ec2_credentials(
self.creds.user_id)["credentials"]
diff --git a/tempest/api/identity/v2/test_users.py b/tempest/api/identity/v2/test_users.py
index 158dfb3..2eea860 100644
--- a/tempest/api/identity/v2/test_users.py
+++ b/tempest/api/identity/v2/test_users.py
@@ -15,6 +15,8 @@
import time
+import testtools
+
from tempest.api.identity import base
from tempest import config
from tempest.lib.common.utils import data_utils
@@ -78,6 +80,10 @@
self.non_admin_users_client.auth_provider.set_auth()
@decorators.idempotent_id('165859c9-277f-4124-9479-a7d1627b0ca7')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_user_update_own_password(self):
old_pass = self.creds.password
old_token = self.non_admin_users_client.token
diff --git a/tempest/api/identity/v3/test_api_discovery.py b/tempest/api/identity/v3/test_api_discovery.py
index c04c21b..e87d1cd 100644
--- a/tempest/api/identity/v3/test_api_discovery.py
+++ b/tempest/api/identity/v3/test_api_discovery.py
@@ -14,12 +14,24 @@
# under the License.
from tempest.api.identity import base
+from tempest import config
from tempest.lib import decorators
+CONF = config.CONF
+
+
class TestApiDiscovery(base.BaseIdentityV3Test):
"""Tests for API discovery features."""
+ @decorators.idempotent_id('79aec9ae-710f-4c54-a4fc-3aa25b4feac3')
+ def test_identity_v3_existence(self):
+ versions = self.non_admin_versions_client.list_versions()
+ found = any(
+ "v3" in version.get('id')
+ for version in versions['versions']['values'])
+ self.assertEqual(CONF.identity_feature_enabled.api_v3, found)
+
@decorators.idempotent_id('721f480f-35b6-46c7-846e-047e6acea0dc')
@decorators.attr(type='smoke')
def test_list_api_versions(self):
diff --git a/tempest/api/identity/v3/test_catalog.py b/tempest/api/identity/v3/test_catalog.py
index deec2dc..ce6adf9 100644
--- a/tempest/api/identity/v3/test_catalog.py
+++ b/tempest/api/identity/v3/test_catalog.py
@@ -19,11 +19,13 @@
class IdentityCatalogTest(base.BaseIdentityV3Test):
+ """Test service's catalog type values"""
@decorators.idempotent_id('56b57ced-22b8-4127-9b8a-565dfb0207e2')
def test_catalog_standardization(self):
- # http://git.openstack.org/cgit/openstack/service-types-authority
- # /tree/service-types.yaml
+ """Test that every service has a standard catalog type value"""
+ # https://opendev.org/openstack/service-types-authority
+ # /src/branch/master/service-types.yaml
standard_service_values = [{'name': 'keystone', 'type': 'identity'},
{'name': 'nova', 'type': 'compute'},
{'name': 'glance', 'type': 'image'},
@@ -31,11 +33,9 @@
# next, we need to GET the catalog using the catalog client
catalog = self.non_admin_catalog_client.show_catalog()['catalog']
# get list of the service types present in the catalog
- catalog_services = []
- for service in catalog:
- catalog_services.append(service['type'])
+ catalog_services = [service['type'] for service in catalog]
for service in standard_service_values:
- # if service enabled, check if it has a standard typevalue
+ # if service enabled, check if it has a standard type value
if service['name'] == 'keystone' or\
getattr(CONF.service_available, service['name']):
self.assertIn(service['type'], catalog_services)
diff --git a/tempest/api/identity/v3/test_domains.py b/tempest/api/identity/v3/test_domains.py
new file mode 100644
index 0000000..9f132dd
--- /dev/null
+++ b/tempest/api/identity/v3/test_domains.py
@@ -0,0 +1,39 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class DefaultDomainTestJSON(base.BaseIdentityV3Test):
+
+ @classmethod
+ def setup_clients(cls):
+ super(DefaultDomainTestJSON, cls).setup_clients()
+ cls.domains_client = cls.os_primary.domains_client
+
+ @classmethod
+ def resource_setup(cls):
+ super(DefaultDomainTestJSON, cls).resource_setup()
+ cls.domain_id = CONF.identity.default_domain_id
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
+ def test_default_domain_exists(self):
+ domain = self.domains_client.show_domain(self.domain_id)['domain']
+ self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/v3/test_tokens.py b/tempest/api/identity/v3/test_tokens.py
index f13aa10..fa1c47f 100644
--- a/tempest/api/identity/v3/test_tokens.py
+++ b/tempest/api/identity/v3/test_tokens.py
@@ -43,7 +43,15 @@
self.assertEqual(authenticated_token, token_body)
# test to see if token has been properly authenticated
self.assertEqual(authenticated_token['user']['id'], user_id)
- self.assertEqual(authenticated_token['user']['name'], username)
+ # NOTE: resource name that are case-sensitive in keystone
+ # depends on backends such as MySQL or LDAP which are
+ # case-insensitive, case-preserving. Resource name is
+ # returned as it is stored in the backend, not as it is
+ # requested. Verifying the username with both lower-case to
+ # avoid failure on different backends
+ self.assertEqual(
+ authenticated_token['user']['name'].lower(), username.lower())
+
self.non_admin_client.delete_token(subject_token)
self.assertRaises(
lib_exc.NotFound, self.non_admin_client.show_token, subject_token)
@@ -84,10 +92,17 @@
self.assertIsNotNone(subject_id, 'Expected user ID in token.')
subject_name = resp['user']['name']
+
if username:
- self.assertEqual(subject_name, username)
+ # NOTE: resource name that are case-sensitive in keystone
+ # depends on backends such as MySQL or LDAP which are
+ # case-insensitive, case-preserving. Resource name is
+ # returned as it is stored in the backend, not as it is
+ # requested. Verifying the username with both lower-case to
+ # avoid failure on different backends
+ self.assertEqual(subject_name.lower(), username.lower())
else:
- # Expect a user name, but don't know what it will be.
+ # Expect a user name, but don't know what it will be
self.assertIsNotNone(subject_name, 'Expected user name in token.')
self.assertEqual(resp['methods'][0], 'password')
@@ -110,7 +125,15 @@
subject_token)['token']
self.assertEqual(resp['x-subject-token'], subject_token)
self.assertEqual(token_details['user']['id'], user.user_id)
- self.assertEqual(token_details['user']['name'], user.username)
+ # NOTE: resource name that are case-sensitive in keystone
+ # depends on backends such as MySQL or LDAP which are
+ # case-insensitive, case-preserving. Resource name is
+ # returned as it is stored in the backend, not as it is
+ # requested. Verifying the username with both lower-case to
+ # avoid failure on different backends
+ self.assertEqual(
+ token_details['user']['name'].lower(),
+ user.username.lower())
# Perform Delete Token
self.non_admin_client.delete_token(subject_token)
self.assertRaises(lib_exc.NotFound,
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 6d6baca..d4e7612 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -77,6 +77,10 @@
self.non_admin_users_client.auth_provider.set_auth()
@decorators.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_user_update_own_password(self):
old_pass = self.creds.password
old_token = self.non_admin_client.token
@@ -102,6 +106,10 @@
@testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
'Security compliance not available.')
@decorators.idempotent_id('941784ee-5342-4571-959b-b80dd2cea516')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_password_history_check_self_service_api(self):
old_pass = self.creds.password
new_pass1 = data_utils.rand_password()
@@ -133,6 +141,13 @@
'Security compliance not available.')
@decorators.idempotent_id('a7ad8bbf-2cff-4520-8c1d-96332e151658')
def test_user_account_lockout(self):
+ if (CONF.identity.user_lockout_failure_attempts <= 0 or
+ CONF.identity.user_lockout_duration <= 0):
+ raise self.skipException(
+ "Both CONF.identity.user_lockout_failure_attempts and "
+ "CONF.identity.user_lockout_duration should be greater than "
+ "zero to test this feature")
+
password = self.creds.password
# First, we login using the correct credentials
diff --git a/tempest/api/image/v1/test_image_members.py b/tempest/api/image/v1/test_image_members.py
index bf2e510..5e2c8af 100644
--- a/tempest/api/image/v1/test_image_members.py
+++ b/tempest/api/image/v1/test_image_members.py
@@ -19,9 +19,11 @@
class ImageMembersTest(base.BaseV1ImageMembersTest):
+ """Test image members"""
@decorators.idempotent_id('1d6ef640-3a20-4c84-8710-d95828fdb6ad')
def test_add_image_member(self):
+ """Test adding member for image"""
image = self._create_image()
self.image_member_client.create_image_member(image, self.alt_tenant_id)
body = self.image_member_client.list_image_members(image)
@@ -33,6 +35,7 @@
@decorators.idempotent_id('6a5328a5-80e8-4b82-bd32-6c061f128da9')
def test_get_shared_images(self):
+ """Test getting shared images"""
image = self._create_image()
self.image_member_client.create_image_member(image, self.alt_tenant_id)
share_image = self._create_image()
@@ -47,6 +50,7 @@
@decorators.idempotent_id('a76a3191-8948-4b44-a9d6-4053e5f2b138')
def test_remove_member(self):
+ """Test removing member from image"""
image_id = self._create_image()
self.image_member_client.create_image_member(image_id,
self.alt_tenant_id)
diff --git a/tempest/api/image/v1/test_image_members_negative.py b/tempest/api/image/v1/test_image_members_negative.py
index 2748bd5..4e3c27c 100644
--- a/tempest/api/image/v1/test_image_members_negative.py
+++ b/tempest/api/image/v1/test_image_members_negative.py
@@ -19,11 +19,12 @@
class ImageMembersNegativeTest(base.BaseV1ImageMembersTest):
+ """Negative tests of image members"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('147a9536-18e3-45da-91ea-b037a028f364')
def test_add_member_with_non_existing_image(self):
- # Add member with non existing image.
+ """Add member with non existing image"""
non_exist_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.image_member_client.create_image_member,
@@ -32,7 +33,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e1559f05-b667-4f1b-a7af-518b52dc0c0f')
def test_delete_member_with_non_existing_image(self):
- # Delete member with non existing image.
+ """Delete member with non existing image"""
non_exist_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.image_member_client.delete_image_member,
@@ -41,7 +42,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f5720333-dd69-4194-bb76-d2f048addd56')
def test_delete_member_with_non_existing_tenant(self):
- # Delete member with non existing tenant.
+ """Delete member from image with non existing tenant"""
image_id = self._create_image()
non_exist_tenant = data_utils.rand_uuid_hex()
self.assertRaises(lib_exc.NotFound,
@@ -51,7 +52,10 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f25f89e4-0b6c-453b-a853-1f80b9d7ef26')
def test_get_image_without_membership(self):
- # Image is hidden from another tenants.
+ """Get image without membership
+
+ Image is hidden from another tenants.
+ """
image_id = self._create_image()
self.assertRaises(lib_exc.NotFound,
self.alt_img_cli.show_image,
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 2432c8b..595717e 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -57,7 +57,7 @@
@decorators.idempotent_id('3027f8e6-3492-4a11-8575-c3293017af4d')
def test_register_then_upload(self):
- # Register, then upload an image
+ """Register, then upload an image"""
properties = {'prop1': 'val1'}
container_format, disk_format = get_container_and_disk_format()
image = self.create_image(name='New Name',
@@ -79,7 +79,7 @@
@decorators.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
def test_register_remote_image(self):
- # Register a new remote image
+ """Register a new remote image"""
container_format, disk_format = get_container_and_disk_format()
body = self.create_image(name='New Remote Image',
container_format=container_format,
@@ -96,6 +96,7 @@
@decorators.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
def test_register_http_image(self):
+ """Register a new image from an http image path url"""
container_format, disk_format = get_container_and_disk_format()
image = self.create_image(name='New Http Image',
container_format=container_format,
@@ -108,7 +109,7 @@
@decorators.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
def test_register_image_with_min_ram(self):
- # Register an image with min ram
+ """Register an image with min ram"""
container_format, disk_format = get_container_and_disk_format()
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
@@ -213,7 +214,7 @@
@decorators.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
def test_index_no_params(self):
- # Simple test to see all fixture images returned
+ """Simple test to see all fixture images returned"""
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
for image_id in self.created_images:
@@ -221,6 +222,7 @@
@decorators.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
def test_index_disk_format(self):
+ """Test listing images by disk format"""
images_list = self.client.list_images(
disk_format=self.disk_format_alt)['images']
for image in images_list:
@@ -232,6 +234,7 @@
@decorators.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
def test_index_container_format(self):
+ """Test listing images by container format"""
images_list = self.client.list_images(
container_format=self.container_format)['images']
for image in images_list:
@@ -243,6 +246,7 @@
@decorators.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
def test_index_max_size(self):
+ """Test listing images by max size"""
images_list = self.client.list_images(size_max=42)['images']
for image in images_list:
self.assertLessEqual(image['size'], 42)
@@ -252,6 +256,7 @@
@decorators.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
def test_index_min_size(self):
+ """Test listing images by min size"""
images_list = self.client.list_images(size_min=142)['images']
for image in images_list:
self.assertGreaterEqual(image['size'], 142)
@@ -261,6 +266,7 @@
@decorators.idempotent_id('e5dc26d9-9aa2-48dd-bda5-748e1445da98')
def test_index_status_active_detail(self):
+ """Test listing active images sorting by size in descending order"""
images_list = self.client.list_images(detail=True,
status='active',
sort_key='size',
@@ -274,6 +280,7 @@
@decorators.idempotent_id('097af10a-bae8-4342-bff4-edf89969ed2a')
def test_index_name(self):
+ """Test listing images by its name"""
images_list = self.client.list_images(
detail=True,
name='New Remote Image dup')['images']
@@ -285,6 +292,8 @@
class UpdateImageMetaTest(base.BaseV1ImageTest):
+ """Test image metadata"""
+
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
@@ -308,6 +317,7 @@
@decorators.idempotent_id('01752c1c-0275-4de3-9e5b-876e44541928')
def test_list_image_metadata(self):
+ """Test listing image metadata"""
# All metadata key/value pairs for an image should be returned
resp = self.client.check_image(self.image_id)
resp_metadata = common_image.get_image_meta_from_headers(resp)
@@ -316,6 +326,7 @@
@decorators.idempotent_id('d6d7649c-08ce-440d-9ea7-e3dda552f33c')
def test_update_image_metadata(self):
+ """Test updating image metadata"""
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
resp = self.client.check_image(self.image_id)
diff --git a/tempest/api/image/v1/test_images_negative.py b/tempest/api/image/v1/test_images_negative.py
index 690b8da..2af1288 100644
--- a/tempest/api/image/v1/test_images_negative.py
+++ b/tempest/api/image/v1/test_images_negative.py
@@ -26,7 +26,10 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('036ede36-6160-4463-8c01-c781eee6369d')
def test_register_with_invalid_container_format(self):
- # Negative tests for invalid data supplied to POST /images
+ """Create image with invalid container format
+
+ Negative tests for invalid data supplied to POST /images
+ """
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
headers={'x-image-meta-name': 'test',
'x-image-meta-container_format': 'wrong',
@@ -35,6 +38,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('993face5-921d-4e84-aabf-c1bba4234a67')
def test_register_with_invalid_disk_format(self):
+ """Create image with invalid disk format"""
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
headers={'x-image-meta-name': 'test',
'x-image-meta-container_format': 'bare',
@@ -43,7 +47,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ec652588-7e3c-4b67-a2f2-0fa96f57c8fc')
def test_delete_non_existent_image(self):
- # Return an error while trying to delete a non-existent image
+ """Return an error while trying to delete a non-existent image"""
non_existent_image_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
@@ -52,13 +56,13 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('04f72aa3-fcec-45a3-81a3-308ef7cc82bc')
def test_delete_image_blank_id(self):
- # Return an error while trying to delete an image with blank Id
+ """Return an error while trying to delete an image with blank Id"""
self.assertRaises(lib_exc.NotFound, self.client.delete_image, '')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('950e5054-a3c7-4dee-ada5-e576f1087abd')
def test_delete_image_non_hex_string_id(self):
- # Return an error while trying to delete an image with non hex id
+ """Return an error while trying to delete an image with non hex id"""
invalid_image_id = data_utils.rand_uuid()[:-1] + "j"
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
invalid_image_id)
@@ -66,13 +70,13 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('4ed757cd-450c-44b1-9fd1-c819748c650d')
def test_delete_image_negative_image_id(self):
- # Return an error while trying to delete an image with negative id
+ """Return an error while trying to delete an image with negative id"""
self.assertRaises(lib_exc.NotFound, self.client.delete_image, -1)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a4a448ab-3db2-4d2d-b9b2-6a1271241dfe')
def test_delete_image_id_over_character_limit(self):
- # Return an error while trying to delete image with id over limit
+ """Return an error while trying to delete image with id over limit"""
overlimit_image_id = data_utils.rand_uuid() + "1"
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
overlimit_image_id)
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index dbb8c58..7e13d7f 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -19,10 +19,12 @@
class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
+ """"Test image operations about image owner"""
@decorators.related_bug('1420008')
@decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e')
def test_create_image_owner_param(self):
+ """Test creating image with specified owner"""
# NOTE: Create image with owner different from tenant owner by
# using "owner" parameter requires an admin privileges.
random_id = data_utils.rand_uuid_hex()
@@ -35,6 +37,7 @@
@decorators.related_bug('1420008')
@decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553')
def test_update_image_owner_param(self):
+ """Test updating image owner"""
random_id_1 = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id_1)
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index aa57daf..c4a3e0e 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -57,6 +57,13 @@
self.assertIn('status', image)
self.assertEqual('queued', image['status'])
+ # NOTE: This Glance API returns different status codes for image
+ # condition. In this empty data case, Glance should return 204,
+ # so here should check the status code.
+ image_file = self.client.show_image_file(image['id'])
+ self.assertEqual(0, len(image_file.data))
+ self.assertEqual(204, image_file.response.status)
+
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = six.BytesIO(file_content)
@@ -81,8 +88,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
- # Deletes an image by image_id
-
+ """Test deleting an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
@@ -103,8 +109,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
- # Updates an image by image_id
-
+ """Test updating an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
@@ -115,17 +120,6 @@
visibility='private')
self.assertEqual('queued', image['status'])
- # NOTE: This Glance API returns different status codes for image
- # condition. In this empty data case, Glance should return 204,
- # so here should check the status code.
- image_file = self.client.show_image_file(image['id'])
- self.assertEqual(0, len(image_file.data))
- self.assertEqual(204, image_file.response.status)
-
- # Now try uploading an image file
- image_file = six.BytesIO(data_utils.random_bytes())
- self.client.store_image_file(image['id'], image_file)
-
# Update Image
new_image_name = data_utils.rand_name('new-image')
self.client.update_image(image['id'], [
@@ -139,6 +133,7 @@
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
+ """Test deactivating and reactivating an image"""
# Create image
image_name = data_utils.rand_name('image')
image = self.create_image(name=image_name,
@@ -185,7 +180,7 @@
for disk_fmt in disk_fmts]
for (container_fmt, disk_fmt) in all_pairs[:6]:
- LOG.debug("Creating an image"
+ LOG.debug("Creating an image "
"(Container format: %s, Disk format: %s).",
container_fmt, disk_fmt)
cls._create_standard_image(container_fmt, disk_fmt)
@@ -239,7 +234,7 @@
@decorators.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_list_no_params(self):
- # Simple test to see all fixture images returned
+ """Simple test to see all fixture images returned"""
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
@@ -248,25 +243,25 @@
@decorators.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
- # Test to get all images with a specific container_format
+ """Test to get all images with a specific container_format"""
params = {"container_format": self.test_data['container_format']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
- # Test to get all images with disk_format = raw
+ """Test to get all images with disk_format = raw"""
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
- # Test to get all images with visibility = private
+ """Test to get all images with visibility = private"""
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
- # Test to get all images by size
+ """Test to get all images by size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
@@ -276,7 +271,7 @@
@decorators.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
- # Test to get all images with size between 2000 to 3000
+ """Test to get all images with min size and max size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
@@ -294,13 +289,13 @@
@decorators.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
- # Test to get all active images
+ """Test to get all active images"""
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
- # Test to get images by limit
+ """Test to get images by limit"""
params = {"limit": 1}
images_list = self.client.list_images(params=params)['images']
@@ -309,7 +304,7 @@
@decorators.idempotent_id('e9a44b91-31c8-4b40-a332-e0a39ffb4dbb')
def test_list_image_param_owner(self):
- # Test to get images by owner
+ """Test to get images by owner"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
@@ -319,13 +314,13 @@
@decorators.idempotent_id('55c8f5f5-bfed-409d-a6d5-4caeda985d7b')
def test_list_images_param_name(self):
- # Test to get images by name
+ """Test to get images by name"""
params = {'name': self.test_data['name']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('aa8ac4df-cff9-418b-8d0f-dd9c67b072c9')
def test_list_images_param_tag(self):
- # Test to get images matching a tag
+ """Test to get images matching a tag"""
params = {'tag': self.test_data['tags'][0]}
images_list = self.client.list_images(params=params)['images']
# Validating properties of fetched images
@@ -340,24 +335,26 @@
@decorators.idempotent_id('eeadce49-04e0-43b7-aec7-52535d903e7a')
def test_list_images_param_sort(self):
+ """Test listing images sorting in descending order"""
params = {'sort': 'size:desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('9faaa0c2-c3a5-43e1-8f61-61c54b409a49')
def test_list_images_param_sort_key_dir(self):
+ """Test listing images sorting by size in descending order"""
params = {'sort_key': 'size', 'sort_dir': 'desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
- # Test to get image schema
+ """Test to get image schema"""
schema = "image"
body = self.schemas_client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
- # Test to get images schema
+ """Test to get images schema"""
schema = "images"
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
@@ -376,6 +373,7 @@
@decorators.idempotent_id('3fa50be4-8e38-4c02-a8db-7811bb780122')
def test_list_images_param_member_status(self):
+ """Test listing images by member_status and visibility"""
# Create an image to be shared using default visibility
image_file = six.BytesIO(data_utils.random_bytes(2048))
container_format = CONF.image.container_formats[0]
diff --git a/tempest/api/image/v2/test_images_member.py b/tempest/api/image/v2/test_images_member.py
index e19d8c8..bc67859 100644
--- a/tempest/api/image/v2/test_images_member.py
+++ b/tempest/api/image/v2/test_images_member.py
@@ -15,9 +15,11 @@
class ImagesMemberTest(base.BaseV2MemberImageTest):
+ """Test image members"""
@decorators.idempotent_id('5934c6ea-27dc-4d6e-9421-eeb5e045494a')
def test_image_share_accept(self):
+ """Test sharing and accepting an image"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -41,6 +43,7 @@
@decorators.idempotent_id('d9e83e5f-3524-4b38-a900-22abcb26e90e')
def test_image_share_reject(self):
+ """Test sharing and rejecting an image"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -57,6 +60,7 @@
@decorators.idempotent_id('a6ee18b9-4378-465e-9ad9-9a6de58a3287')
def test_get_image_member(self):
+ """Test getting image members after the image is accepted"""
image_id = self._create_image()
self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -75,6 +79,7 @@
@decorators.idempotent_id('72989bc7-2268-48ed-af22-8821e835c914')
def test_remove_image_member(self):
+ """Test removing image members after the image is accepted"""
image_id = self._create_image()
self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -89,10 +94,12 @@
@decorators.idempotent_id('634dcc3f-f6e2-4409-b8fd-354a0bb25d83')
def test_get_image_member_schema(self):
+ """Test getting image member schema"""
body = self.schemas_client.show_schema("member")
self.assertEqual("member", body['name'])
@decorators.idempotent_id('6ae916ef-1052-4e11-8d36-b3ae14853cbb')
def test_get_image_members_schema(self):
+ """Test getting image members schema"""
body = self.schemas_client.show_schema("members")
self.assertEqual("members", body['name'])
diff --git a/tempest/api/image/v2/test_images_member_negative.py b/tempest/api/image/v2/test_images_member_negative.py
index caa90f9..5f6f1ae 100644
--- a/tempest/api/image/v2/test_images_member_negative.py
+++ b/tempest/api/image/v2/test_images_member_negative.py
@@ -16,10 +16,12 @@
class ImagesMemberNegativeTest(base.BaseV2MemberImageTest):
+ """Negative tests of image members"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('b79efb37-820d-4cf0-b54c-308b00cf842c')
def test_image_share_invalid_status(self):
+ """Test updating image member status to invalid status should fail"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -32,6 +34,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('27002f74-109e-4a37-acd0-f91cd4597967')
def test_image_share_owner_cannot_accept(self):
+ """Test that image owner can't accept image shared to other member"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py b/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
index 80f8112..32b81b1 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
@@ -30,6 +30,7 @@
@decorators.idempotent_id('b1a3775e-3b5c-4f6a-a3b4-1ba3574ae718')
def test_create_update_delete_meta_namespace_objects(self):
+ """Test creating/updating/deleting image metadata namespace objects"""
# Create a namespace
namespace = self.create_namespace()
# Create a namespace object
@@ -52,6 +53,7 @@
@decorators.idempotent_id('a2a3615e-3b5c-3f6a-a2b1-1ba3574ae738')
def test_list_meta_namespace_objects(self):
+ """Test listing image metadata namespace objects"""
# Create a namespace object
namespace = self.create_namespace()
meta_namespace_object = self._create_namespace_object(namespace)
@@ -64,6 +66,7 @@
@decorators.idempotent_id('b1a3674e-3b4c-3f6a-a3b4-1ba3573ca768')
def test_show_meta_namespace_objects(self):
+ """Test showing image metadata namespace object"""
# Create a namespace object
namespace = self.create_namespace()
namespace_object = self._create_namespace_object(namespace)
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_properties.py b/tempest/api/image/v2/test_images_metadefs_namespace_properties.py
index ed91726..1d4f0a6 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_properties.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespace_properties.py
@@ -20,6 +20,7 @@
@decorators.idempotent_id('b1a3765e-3a5d-4f6d-a3a7-3ca3476ae768')
def test_basic_meta_def_namespace_property(self):
+ """Test operations of image metadata definition namespace property"""
# Get the available resource types and use one resource_type
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py b/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
index 482e808..dc64185 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
@@ -43,6 +43,7 @@
@decorators.idempotent_id('a2a3765e-3a6d-4f6d-a3a7-3cc3476aa876')
def test_create_list_delete_namespace_tags(self):
+ """Test creating/listing/deleting image metadata namespace tags"""
# Create a namespace
namespace = self.create_namespace()
self._create_namespace_tags(namespace)
@@ -62,6 +63,7 @@
@decorators.idempotent_id('a2a3765e-1a2c-3f6d-a3a7-3cc3466ab875')
def test_create_update_delete_tag(self):
+ """Test creating/updating/deleting image metadata namespace tag"""
# Create a namespace
namespace = self.create_namespace()
self._create_namespace_tags(namespace)
diff --git a/tempest/api/image/v2/test_images_metadefs_namespaces.py b/tempest/api/image/v2/test_images_metadefs_namespaces.py
index f71b16c..502949f 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespaces.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespaces.py
@@ -25,6 +25,7 @@
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
def test_basic_metadata_definition_namespaces(self):
+ """Test operations of image metadata definition namespaces"""
# get the available resource types and use one resource_type
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images_metadefs_resource_types.py b/tempest/api/image/v2/test_images_metadefs_resource_types.py
index c60b3f7..6867f2d 100644
--- a/tempest/api/image/v2/test_images_metadefs_resource_types.py
+++ b/tempest/api/image/v2/test_images_metadefs_resource_types.py
@@ -22,6 +22,7 @@
@decorators.idempotent_id('6f358a4e-5ef0-11e6-a795-080027d0d606')
def test_basic_meta_def_resource_type_association(self):
+ """Test image resource type associations"""
# Get the available resource types and use one resource_type
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images_metadefs_schema.py b/tempest/api/image/v2/test_images_metadefs_schema.py
index 95cc310..7dd36d2 100644
--- a/tempest/api/image/v2/test_images_metadefs_schema.py
+++ b/tempest/api/image/v2/test_images_metadefs_schema.py
@@ -18,64 +18,64 @@
class MetadataSchemaTest(base.BaseV2ImageTest):
- """Test to get metadata schema"""
+ """Test to get image metadata schema"""
@decorators.idempotent_id('e9e44891-3cb8-3b40-a532-e0a39fea3dab')
def test_get_metadata_namespace_schema(self):
- # Test to get namespace schema
+ """Test to get image namespace schema"""
body = self.schemas_client.show_schema("metadefs/namespace")
self.assertEqual("namespace", body['name'])
@decorators.idempotent_id('ffe44891-678b-3ba0-a3e2-e0a3967b3aeb')
def test_get_metadata_namespaces_schema(self):
- # Test to get namespaces schema
+ """Test to get image namespaces schema"""
body = self.schemas_client.show_schema("metadefs/namespaces")
self.assertEqual("namespaces", body['name'])
@decorators.idempotent_id('fde34891-678b-3b40-ae32-e0a3e67b6beb')
def test_get_metadata_resource_type_schema(self):
- # Test to get resource_type schema
+ """Test to get image resource_type schema"""
body = self.schemas_client.show_schema("metadefs/resource_type")
self.assertEqual("resource_type_association", body['name'])
@decorators.idempotent_id('dfe4a891-b38b-3bf0-a3b2-e03ee67b3a3a')
def test_get_metadata_resources_types_schema(self):
- # Test to get resource_types schema
+ """Test to get image resource_types schema"""
body = self.schemas_client.show_schema("metadefs/resource_types")
self.assertEqual("resource_type_associations", body['name'])
@decorators.idempotent_id('dff4a891-b38b-3bf0-a3b2-e03ee67b3a3b')
def test_get_metadata_object_schema(self):
- # Test to get object schema
+ """Test to get image object schema"""
body = self.schemas_client.show_schema("metadefs/object")
self.assertEqual("object", body['name'])
@decorators.idempotent_id('dee4a891-b38b-3bf0-a3b2-e03ee67b3a3c')
def test_get_metadata_objects_schema(self):
- # Test to get objects schema
+ """Test to get image objects schema"""
body = self.schemas_client.show_schema("metadefs/objects")
self.assertEqual("objects", body['name'])
@decorators.idempotent_id('dae4a891-b38b-3bf0-a3b2-e03ee67b3a3d')
def test_get_metadata_property_schema(self):
- # Test to get property schema
+ """Test to get image property schema"""
body = self.schemas_client.show_schema("metadefs/property")
self.assertEqual("property", body['name'])
@decorators.idempotent_id('dce4a891-b38b-3bf0-a3b2-e03ee67b3a3e')
def test_get_metadata_properties_schema(self):
- # Test to get properties schema
+ """Test to get image properties schema"""
body = self.schemas_client.show_schema("metadefs/properties")
self.assertEqual("properties", body['name'])
@decorators.idempotent_id('dde4a891-b38b-3bf0-a3b2-e03ee67b3a3e')
def test_get_metadata_tag_schema(self):
- # Test to get tag schema
+ """Test to get image tag schema"""
body = self.schemas_client.show_schema("metadefs/tag")
self.assertEqual("tag", body['name'])
@decorators.idempotent_id('cde4a891-b38b-3bf0-a3b2-e03ee67b3a3a')
def test_get_metadata_tags_schema(self):
- # Test to get tags schema
+ """Test to get image tags schema"""
body = self.schemas_client.show_schema("metadefs/tags")
self.assertEqual("tags", body['name'])
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index b4baf05..dc2bb96 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -36,7 +36,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81d9f')
def test_get_non_existent_image(self):
- # get the non-existent image
+ """Get the non-existent image"""
non_existent_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_image,
non_existent_id)
@@ -44,14 +44,14 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2562ad')
def test_get_image_null_id(self):
- # get image with image_id = NULL
+ """Get image with image_id = NULL"""
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.show_image, image_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e57fc127-7ba0-4693-92d7-1d8a05ebcba9')
def test_get_delete_deleted_image(self):
- # get and delete the deleted image
+ """Get and delete the deleted image"""
# create and delete image
image = self.client.create_image(name='test',
container_format='bare',
@@ -70,7 +70,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6fe40f1c-57bd-4918-89cc-8500f850f3de')
def test_delete_non_existing_image(self):
- # delete non-existent image
+ """Delete non-existent image"""
non_existent_image_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
non_existent_image_id)
@@ -78,7 +78,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('32248db1-ab88-4821-9604-c7c369f1f88c')
def test_delete_image_null_id(self):
- # delete image with image_id=NULL
+ """Delete image with image_id=NULL"""
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@@ -86,7 +86,10 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('292bd310-369b-41c7-a7a3-10276ef76753')
def test_register_with_invalid_container_format(self):
- # Negative tests for invalid data supplied to POST /images
+ """Create image with invalid container format
+
+ Negative tests for invalid data supplied to POST /images
+ """
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
name='test', container_format='wrong',
disk_format='vhd')
@@ -94,6 +97,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
def test_register_with_invalid_disk_format(self):
+ """Create image with invalid disk format"""
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
name='test', container_format='bare',
disk_format='wrong')
@@ -101,7 +105,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ab980a34-8410-40eb-872b-f264752f46e5')
def test_delete_protected_image(self):
- # Create a protected image
+ """Create a protected image"""
image = self.create_image(protected=True)
self.addCleanup(self.client.update_image, image['id'],
[dict(replace="/protected", value=False)])
diff --git a/tempest/api/image/v2/test_images_tags.py b/tempest/api/image/v2/test_images_tags.py
index 601826e..163063c 100644
--- a/tempest/api/image/v2/test_images_tags.py
+++ b/tempest/api/image/v2/test_images_tags.py
@@ -18,9 +18,11 @@
class ImagesTagsTest(base.BaseV2ImageTest):
+ """Test image tags"""
@decorators.idempotent_id('10407036-6059-4f95-a2cd-cbbbee7ed329')
def test_update_delete_tags_for_image(self):
+ """Test adding and deleting image tags"""
image = self.create_image(container_format='bare',
disk_format='raw',
visibility='private')
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 440fa36..2db4a74 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -19,11 +19,12 @@
class ImagesTagsNegativeTest(base.BaseV2ImageTest):
+ """Negative tests of image tags"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('8cd30f82-6f9a-4c6e-8034-c1b51fba43d9')
def test_update_tags_for_non_existing_image(self):
- # Update tag with non existing image.
+ """Update image tag with non existing image"""
tag = data_utils.rand_name('tag')
non_exist_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.add_image_tag,
@@ -32,7 +33,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('39c023a2-325a-433a-9eea-649bf1414b19')
def test_delete_non_existing_tag(self):
- # Delete non existing tag.
+ """Delete non existing image tag"""
image = self.create_image(container_format='bare',
disk_format='raw',
visibility='private'
diff --git a/tempest/api/image/v2/test_versions.py b/tempest/api/image/v2/test_versions.py
index 84f1068..ef91354 100644
--- a/tempest/api/image/v2/test_versions.py
+++ b/tempest/api/image/v2/test_versions.py
@@ -17,10 +17,12 @@
class VersionsTest(base.BaseV2ImageTest):
+ """Test image versions"""
@decorators.idempotent_id('659ea30a-a17c-4317-832c-0f68ed23c31d')
@decorators.attr(type='smoke')
def test_list_versions(self):
+ """Test listing image versions"""
versions = self.versions_client.list_versions()['versions']
expected_resources = ('id', 'links', 'status')
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
deleted file mode 100644
index 5068fc4..0000000
--- a/tempest/api/network/admin/test_agent_management.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.network import base
-from tempest.common import tempest_fixtures as fixtures
-from tempest.common import utils
-from tempest.lib import decorators
-
-
-class AgentManagementTestJSON(base.BaseAdminNetworkTest):
-
- @classmethod
- def skip_checks(cls):
- super(AgentManagementTestJSON, cls).skip_checks()
- if not utils.is_extension_enabled('agent', 'network'):
- msg = "agent extension not enabled."
- raise cls.skipException(msg)
-
- @classmethod
- def resource_setup(cls):
- super(AgentManagementTestJSON, cls).resource_setup()
- body = cls.admin_agents_client.list_agents()
- agents = body['agents']
- cls.agent = agents[0]
-
- @decorators.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4')
- def test_list_agent(self):
- body = self.admin_agents_client.list_agents()
- agents = body['agents']
- # Hearthbeats must be excluded from comparison
- self.agent.pop('heartbeat_timestamp', None)
- self.agent.pop('configurations', None)
- for agent in agents:
- agent.pop('heartbeat_timestamp', None)
- agent.pop('configurations', None)
- self.assertIn(self.agent, agents)
-
- @decorators.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
- def test_list_agents_non_admin(self):
- body = self.agents_client.list_agents()
- self.assertEmpty(body["agents"])
-
- @decorators.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
- def test_show_agent(self):
- body = self.admin_agents_client.show_agent(self.agent['id'])
- agent = body['agent']
- self.assertEqual(agent['id'], self.agent['id'])
-
- @decorators.idempotent_id('371dfc5b-55b9-4cb5-ac82-c40eadaac941')
- def test_update_agent_status(self):
- origin_status = self.agent['admin_state_up']
- # Try to update the 'admin_state_up' to the original
- # one to avoid the negative effect.
- agent_status = {'admin_state_up': origin_status}
- body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
- agent=agent_status)
- updated_status = body['agent']['admin_state_up']
- self.assertEqual(origin_status, updated_status)
-
- @decorators.idempotent_id('68a94a14-1243-46e6-83bf-157627e31556')
- def test_update_agent_description(self):
- self.useFixture(fixtures.LockFixture('agent_description'))
- description = 'description for update agent.'
- agent_description = {'description': description}
- body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
- agent=agent_description)
- self.addCleanup(self._restore_agent)
- updated_description = body['agent']['description']
- self.assertEqual(updated_description, description)
-
- def _restore_agent(self):
- """Restore the agent description after update test"""
-
- description = self.agent['description'] or ''
- origin_agent = {'description': description}
- self.admin_agents_client.update_agent(agent_id=self.agent['id'],
- agent=origin_agent)
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
index 7e8cc8e..5bd3fce 100644
--- a/tempest/api/network/admin/test_external_network_extension.py
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -36,6 +36,7 @@
body = self.admin_networks_client.create_network(**post_body)
network = body['network']
self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
self.admin_networks_client.delete_network, network['id'])
return network
diff --git a/tempest/api/network/admin/test_external_networks_negative.py b/tempest/api/network/admin/test_external_networks_negative.py
index 0709d2a..da32f2d 100644
--- a/tempest/api/network/admin/test_external_networks_negative.py
+++ b/tempest/api/network/admin/test_external_networks_negative.py
@@ -16,6 +16,7 @@
from tempest.api.network import base
from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -50,5 +51,6 @@
# create a port which will internally create an instance-ip
self.assertRaises(lib_exc.Conflict,
self.admin_ports_client.create_port,
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=CONF.network.public_network_id,
fixed_ips=fixed_ips)
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index be0c4c6..adc4dda 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -16,6 +16,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest import config
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -57,14 +58,18 @@
# Create floating ip from admin user
floating_ip_admin = self.admin_floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id)
- self.addCleanup(self.admin_floating_ips_client.delete_floatingip,
- floating_ip_admin['floatingip']['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.admin_floating_ips_client.delete_floatingip,
+ floating_ip_admin['floatingip']['id'])
# Create floating ip from alt user
body = self.alt_floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id)
floating_ip_alt = body['floatingip']
- self.addCleanup(self.alt_floating_ips_client.delete_floatingip,
- floating_ip_alt['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.alt_floating_ips_client.delete_floatingip,
+ floating_ip_alt['id'])
# List floating ips from admin
body = self.admin_floating_ips_client.list_floatingips()
floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
@@ -91,8 +96,10 @@
tenant_id=self.network['tenant_id'],
port_id=self.port['id'])
created_floating_ip = body['floatingip']
- self.addCleanup(self.floating_ips_client.delete_floatingip,
- created_floating_ip['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.floating_ips_client.delete_floatingip,
+ created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
deleted file mode 100644
index 033bf55..0000000
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.network import base
-from tempest.common import utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-AGENT_TYPE = 'L3 agent'
-AGENT_MODES = (
- 'legacy',
- 'dvr_snat'
-)
-
-
-class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
- """Tests the following operations in the Neutron API:
-
- List routers that the given L3 agent is hosting.
- List L3 agents hosting the given router.
- Add and Remove Router to L3 agent
-
- v2.0 of the Neutron API is assumed.
-
- The l3_agent_scheduler extension is required for these tests.
- """
-
- @classmethod
- def skip_checks(cls):
- super(L3AgentSchedulerTestJSON, cls).skip_checks()
- if not utils.is_extension_enabled('l3_agent_scheduler', 'network'):
- msg = "L3 Agent Scheduler Extension not enabled."
- raise cls.skipException(msg)
-
- @classmethod
- def resource_setup(cls):
- super(L3AgentSchedulerTestJSON, cls).resource_setup()
- agents = cls.admin_agents_client.list_agents(
- agent_type=AGENT_TYPE)['agents']
- for agent in agents:
- if (agent['configurations']['agent_mode'] in AGENT_MODES and
- agent['alive']):
- cls.agent = agent
- break
- else:
- msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
- raise exceptions.InvalidConfiguration(msg)
- cls.router = cls.create_router()
-
- @decorators.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
- def test_list_routers_on_l3_agent(self):
- self.admin_agents_client.list_routers_on_l3_agent(self.agent['id'])
-
- @decorators.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
- def test_add_list_remove_router_on_l3_agent(self):
- l3_agent_ids = list()
- self.admin_agents_client.create_router_on_l3_agent(
- self.agent['id'],
- router_id=self.router['id'])
- body = (
- self.admin_routers_client.list_l3_agents_hosting_router(
- self.router['id']))
- for agent in body['agents']:
- l3_agent_ids.append(agent['id'])
- self.assertIn('agent_type', agent)
- self.assertEqual('L3 agent', agent['agent_type'])
- self.assertIn(self.agent['id'], l3_agent_ids)
- body = self.admin_agents_client.delete_router_from_l3_agent(
- self.agent['id'],
- self.router['id'])
- # NOTE(afazekas): The deletion not asserted, because neutron
- # is not forbidden to reschedule the router to the same agent
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index e79f8c3..0db038d 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -14,7 +14,10 @@
# under the License.
from tempest.api.network import base
+from tempest.common import identity
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -28,9 +31,8 @@
It is also assumed that the per-project quota extension API is configured
in /etc/neutron/neutron.conf as follows:
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
"""
- force_tenant_isolation = True
@classmethod
def skip_checks(cls):
@@ -39,27 +41,39 @@
msg = "quotas extension not enabled."
raise cls.skipException(msg)
+ def setUp(self):
+ super(QuotasNegativeTest, self).setUp()
+ name = data_utils.rand_name('test_project_')
+ description = data_utils.rand_name('desc_')
+ self.project = identity.identity_utils(self.os_admin).create_project(
+ name=name, description=description)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ self.project['id'])
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
def test_network_quota_exceeding(self):
# Set the network quota to two
- self.admin_quotas_client.update_quotas(self.networks_client.tenant_id,
- network=2)
- self.addCleanup(self.admin_quotas_client.reset_quotas,
- self.networks_client.tenant_id)
+ self.admin_quotas_client.update_quotas(self.project['id'], network=2)
# Create two networks
- n1 = self.networks_client.create_network()
- self.addCleanup(self.networks_client.delete_network,
+ n1 = self.admin_networks_client.create_network(
+ tenant_id=self.project['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_networks_client.delete_network,
n1['network']['id'])
- n2 = self.networks_client.create_network()
- self.addCleanup(self.networks_client.delete_network,
+ n2 = self.admin_networks_client.create_network(
+ tenant_id=self.project['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_networks_client.delete_network,
n2['network']['id'])
# Try to create a third network while the quota is two
with self.assertRaisesRegex(
lib_exc.Conflict,
r"Quota exceeded for resources: \['network'\].*"):
- n3 = self.networks_client.create_network()
- self.addCleanup(self.networks_client.delete_network,
+ n3 = self.admin_networks_client.create_network(
+ tenant_id=self.project['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_networks_client.delete_network,
n3['network']['id'])
diff --git a/tempest/api/network/admin/test_ports.py b/tempest/api/network/admin/test_ports.py
index 05363db..289e577 100644
--- a/tempest/api/network/admin/test_ports.py
+++ b/tempest/api/network/admin/test_ports.py
@@ -14,8 +14,14 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@@ -28,26 +34,35 @@
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
- hyper_list = cls.hyper_client.list_hypervisors()
- cls.host_id = hyper_list['hypervisors'][0]['hypervisor_hostname']
+ if CONF.service_available.nova:
+ hyper_list = cls.hyper_client.list_hypervisors()
+ cls.host_id = hyper_list['hypervisors'][0]['hypervisor_hostname']
@decorators.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
+ @utils.services('compute')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
- "binding:host_id": self.host_id}
+ "binding:host_id": self.host_id,
+ "name": data_utils.rand_name(self.__class__.__name__)}
body = self.admin_ports_client.create_port(**post_body)
port = body['port']
- self.addCleanup(self.admin_ports_client.delete_port, port['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.admin_ports_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@decorators.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
+ @utils.services('compute')
def test_update_port_binding_ext_attr(self):
- post_body = {"network_id": self.network['id']}
+ post_body = {"network_id": self.network['id'],
+ "name": data_utils.rand_name(self.__class__.__name__)}
body = self.admin_ports_client.create_port(**post_body)
port = body['port']
- self.addCleanup(self.admin_ports_client.delete_port, port['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.admin_ports_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
body = self.admin_ports_client.update_port(port['id'], **update_body)
updated_port = body['port']
@@ -56,12 +71,16 @@
self.assertEqual(self.host_id, host_id)
@decorators.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
+ @utils.services('compute')
def test_list_ports_binding_ext_attr(self):
# Create a new port
- post_body = {"network_id": self.network['id']}
+ post_body = {"network_id": self.network['id'],
+ "name": data_utils.rand_name(self.__class__.__name__)}
body = self.admin_ports_client.create_port(**post_body)
port = body['port']
- self.addCleanup(self.admin_ports_client.delete_port, port['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.admin_ports_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
@@ -83,9 +102,11 @@
@decorators.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=self.network['id'])
port = body['port']
- self.addCleanup(self.admin_ports_client.delete_port, port['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_ports_client.delete_port, port['id'])
body = self.admin_ports_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index b1e4a58..ef5ebb6 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -35,7 +35,7 @@
It is also assumed that the per-project quota extension API is configured
in /etc/neutron/neutron.conf as follows:
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
"""
@classmethod
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index a7355f3..41f97d8 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -20,6 +20,7 @@
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -38,7 +39,8 @@
# associate a cleanup with created routers to avoid quota limits
router = self.create_router(name, admin_state_up,
external_network_id, enable_snat)
- self.addCleanup(self._cleanup_router, router)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._cleanup_router, router)
return router
@classmethod
@@ -62,7 +64,8 @@
name = data_utils.rand_name('router-')
create_body = self.admin_routers_client.create_router(
name=name, tenant_id=project_id)
- self.addCleanup(self.admin_routers_client.delete_router,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_routers_client.delete_router,
create_body['router']['id'])
self.assertEqual(project_id, create_body['router']['tenant_id'])
@@ -92,7 +95,8 @@
'enable_snat': enable_snat}
create_body = self.admin_routers_client.create_router(
name=name, external_gateway_info=external_gateway_info)
- self.addCleanup(self.admin_routers_client.delete_router,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_routers_client.delete_router,
create_body['router']['id'])
# Verify snat attributes after router creation
self._verify_router_gateway(create_body['router']['id'],
@@ -111,7 +115,8 @@
def _verify_gateway_port(self, router_id):
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
- device_id=router_id)
+ device_id=router_id,
+ device_owner="network:router_gateway")
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
@@ -207,6 +212,42 @@
'enable_snat': False})
self._verify_gateway_port(router['id'])
+ @decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
+ def test_create_router_set_gateway_with_fixed_ip(self):
+ # At first create an external network and then use that
+ # to create address and delete
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network_1 = self.admin_networks_client.create_network(
+ name=network_name, **{'router:external': True})['network']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_networks_client.delete_network,
+ network_1['id'])
+ subnet = self.create_subnet(
+ network_1, client=self.admin_subnets_client, enable_dhcp=False)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_subnets_client.delete_subnet, subnet['id'])
+ port = self.admin_ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
+ network_id=network_1['id'])['port']
+ self.admin_ports_client.delete_port(port_id=port['id'])
+ fixed_ip = {
+ 'subnet_id': port['fixed_ips'][0]['subnet_id'],
+ 'ip_address': port['fixed_ips'][0]['ip_address']
+ }
+ external_gateway_info = {
+ 'network_id': network_1['id'],
+ 'external_fixed_ips': [fixed_ip]
+ }
+ # Create a router and set gateway to fixed_ip
+ router = self.admin_routers_client.create_router(
+ external_gateway_info=external_gateway_info)['router']
+ self.admin_routers_client.delete_router(router['id'])
+ # Examine router's gateway is equal to fixed_ip
+ self.assertEqual(router['external_gateway_info'][
+ 'external_fixed_ips'][0]['ip_address'],
+ fixed_ip['ip_address'])
+
class RoutersIpV6AdminTest(RoutersAdminTest):
_ip_version = 6
diff --git a/tempest/api/network/admin/test_routers_dvr.py b/tempest/api/network/admin/test_routers_dvr.py
index 93478e6..270f802 100644
--- a/tempest/api/network/admin/test_routers_dvr.py
+++ b/tempest/api/network/admin/test_routers_dvr.py
@@ -18,6 +18,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -62,7 +63,8 @@
name = data_utils.rand_name('router')
router = self.admin_routers_client.create_router(name=name,
distributed=True)
- self.addCleanup(self.admin_routers_client.delete_router,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_routers_client.delete_router,
router['router']['id'])
self.assertTrue(router['router']['distributed'])
@@ -82,7 +84,8 @@
name = data_utils.rand_name('router')
router = self.admin_routers_client.create_router(name=name,
distributed=False)
- self.addCleanup(self.admin_routers_client.delete_router,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_routers_client.delete_router,
router['router']['id'])
self.assertFalse(router['router']['distributed'])
@@ -112,8 +115,8 @@
ha=False,
tenant_id=tenant_id)
router_id = router['router']['id']
- self.addCleanup(self.admin_routers_client.delete_router,
- router_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_routers_client.delete_router, router_id)
self.assertFalse(router['router']['distributed'])
router = self.admin_routers_client.update_router(
router_id, distributed=True)
diff --git a/tempest/api/network/admin/test_routers_negative.py b/tempest/api/network/admin/test_routers_negative.py
index 9356bcc..f605945 100644
--- a/tempest/api/network/admin/test_routers_negative.py
+++ b/tempest/api/network/admin/test_routers_negative.py
@@ -18,6 +18,8 @@
from tempest.api.network import base
from tempest.common import utils
from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -41,8 +43,10 @@
def test_router_set_gateway_used_ip_returns_409(self):
# At first create a address from public_network_id
port = self.admin_ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=CONF.network.public_network_id)['port']
- self.addCleanup(self.admin_ports_client.delete_port,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_ports_client.delete_port,
port_id=port['id'])
# Add used ip and subnet_id in external_fixed_ips
fixed_ip = {
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 9032fdc..b6bf369 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -156,6 +156,8 @@
@classmethod
def create_port(cls, network, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(cls.__name__)
"""Wrapper utility that returns a test port."""
body = cls.ports_client.create_port(network_id=network['id'],
**kwargs)
diff --git a/tempest/api/network/base_security_groups.py b/tempest/api/network/base_security_groups.py
index b8d677a..32f2cdd 100644
--- a/tempest/api/network/base_security_groups.py
+++ b/tempest/api/network/base_security_groups.py
@@ -15,6 +15,7 @@
from tempest.api.network import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
class BaseSecGroupTest(base.BaseNetworkTest):
@@ -24,7 +25,8 @@
name = data_utils.rand_name('secgroup-')
group_create_body = (
self.security_groups_client.create_security_group(name=name))
- self.addCleanup(self._delete_security_group,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_security_group,
group_create_body['security_group']['id'])
self.assertEqual(group_create_body['security_group']['name'], name)
return group_create_body, name
diff --git a/tempest/api/network/test_agent_management_negative.py b/tempest/api/network/test_agent_management_negative.py
new file mode 100644
index 0000000..d1c02ce
--- /dev/null
+++ b/tempest/api/network/test_agent_management_negative.py
@@ -0,0 +1,28 @@
+# Copyright 2018 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.lib import decorators
+
+
+class AgentManagementNegativeTest(base.BaseNetworkTest):
+
+ @decorators.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
+ @decorators.attr(type=['negative'])
+ def test_list_agents_non_admin(self):
+ """Validate that non-admin user cannot list agents."""
+ # Listing agents requires admin_only permissions.
+ body = self.agents_client.list_agents()
+ self.assertEmpty(body["agents"])
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index dec3413..639defb 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -17,6 +17,8 @@
from tempest.api.network import base
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -60,9 +62,11 @@
'mac_address': self.mac_address}]
body = self.ports_client.create_port(
network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
allowed_address_pairs=allowed_address_pairs)
port_id = body['port']['id']
- self.addCleanup(self.ports_client.delete_port, port_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_id)
# Confirm port was created with allowed address pair attribute
body = self.ports_client.list_ports()
@@ -74,9 +78,12 @@
def _update_port_with_address(self, address, mac_address=None, **kwargs):
# Create a port without allowed address pair
- body = self.ports_client.create_port(network_id=self.network['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
port_id = body['port']['id']
- self.addCleanup(self.ports_client.delete_port, port_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_id)
if mac_address is None:
mac_address = self.mac_address
@@ -104,9 +111,12 @@
@decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
def test_update_port_with_multiple_ip_mac_address_pair(self):
# Create an ip _address and mac_address through port create
- resp = self.ports_client.create_port(network_id=self.network['id'])
+ resp = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
newportid = resp['port']['id']
- self.addCleanup(self.ports_client.delete_port, newportid)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, newportid)
ipaddress = resp['port']['fixed_ips'][0]['ip_address']
macaddress = resp['port']['mac_address']
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 0730d58..eb31ed3 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -135,7 +135,7 @@
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
- ('Real port IP %s shall be equal to EUI-64 %s'
+ ('Real port IP %s shall be equal to EUI-64 %s '
'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
real_ip, eui_ip,
ra_mode if ra_mode else "Off",
@@ -167,7 +167,7 @@
self._clean_network()
self.assertNotEqual(eui_ip, real_ip,
('Real port IP %s equal to EUI-64 %s when '
- 'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
+ 'ipv6_ra_mode=Off and ipv6_address_mode=Off, '
'but shall be taken from fixed IPs') % (
real_ip, eui_ip))
@@ -206,7 +206,7 @@
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
- subnet_slaac]]
+ subnet_slaac]]
self.ports_client.delete_port(port['id'])
self.ports.pop()
body = self.ports_client.list_ports()
@@ -257,7 +257,7 @@
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
- subnet_slaac]]
+ subnet_slaac]]
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index 0d42033..d363081 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -16,6 +16,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -60,9 +61,11 @@
# Create a port with Extra DHCP Options
body = self.ports_client.create_port(
network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
- self.addCleanup(self.ports_client.delete_port, port_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_id)
# Confirm port created has Extra DHCP Options
body = self.ports_client.list_ports()
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 504bfa8..aaa5497 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -18,6 +18,7 @@
from tempest.common.utils import data_utils
from tempest.common.utils import net_utils
from tempest import config
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -77,8 +78,10 @@
floating_network_id=self.ext_net_id,
port_id=self.ports[0]['id'])
created_floating_ip = body['floatingip']
- self.addCleanup(self.floating_ips_client.delete_floatingip,
- created_floating_ip['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.floating_ips_client.delete_floatingip,
+ created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
@@ -125,16 +128,23 @@
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
self.assertIsNone(updated_floating_ip['router_id'])
+ # Explicity test deletion of floating IP
+ self.floating_ips_client.delete_floatingip(created_floating_ip['id'])
+
@decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
def test_floating_ip_delete_port(self):
# Create a floating IP
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id)
created_floating_ip = body['floatingip']
- self.addCleanup(self.floating_ips_client.delete_floatingip,
- created_floating_ip['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.floating_ips_client.delete_floatingip,
+ created_floating_ip['id'])
# Create a port
- port = self.ports_client.create_port(network_id=self.network['id'])
+ port = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
created_port = port['port']
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
@@ -158,24 +168,36 @@
floating_network_id=self.ext_net_id,
port_id=self.ports[1]['id'])
created_floating_ip = body['floatingip']
- self.addCleanup(self.floating_ips_client.delete_floatingip,
- created_floating_ip['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.floating_ips_client.delete_floatingip,
+ created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
network_name = data_utils.rand_name(self.__class__.__name__)
network2 = self.networks_client.create_network(
name=network_name)['network']
- self.addCleanup(self.networks_client.delete_network,
- network2['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network,
+ network2['id'])
subnet2 = self.create_subnet(network2)
- self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet2['id'])
router2 = self.create_router(external_network_id=self.ext_net_id)
- self.addCleanup(self.routers_client.delete_router, router2['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.routers_client.delete_router, router2['id'])
self.create_router_interface(router2['id'], subnet2['id'])
- self.addCleanup(self.routers_client.remove_router_interface,
- router2['id'], subnet_id=subnet2['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.routers_client.remove_router_interface,
+ router2['id'], subnet_id=subnet2['id'])
port_other_router = self.create_port(network2)
- self.addCleanup(self.ports_client.delete_port,
- port_other_router['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port,
+ port_other_router['id'])
# Associate floating IP to the other port on another router
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
@@ -194,8 +216,10 @@
port_id=self.ports[1]['id'],
fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
created_floating_ip = body['floatingip']
- self.addCleanup(self.floating_ips_client.delete_floatingip,
- created_floating_ip['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.floating_ips_client.delete_floatingip,
+ created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertEqual(created_floating_ip['fixed_ip_address'],
self.ports[1]['fixed_ips'][0]['ip_address'])
@@ -215,17 +239,21 @@
2)
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
# Create port
- body = self.ports_client.create_port(network_id=self.network['id'],
- fixed_ips=fixed_ips)
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
+ fixed_ips=fixed_ips)
port = body['port']
- self.addCleanup(self.ports_client.delete_port, port['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port['id'])
# Create floating ip
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port['id'],
fixed_ip_address=list_ips[0])
floating_ip = body['floatingip']
- self.addCleanup(self.floating_ips_client.delete_floatingip,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.floating_ips_client.delete_floatingip,
floating_ip['id'])
self.assertIsNotNone(floating_ip['id'])
self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
diff --git a/tempest/api/network/test_floating_ips_negative.py b/tempest/api/network/test_floating_ips_negative.py
index e904a81..1688c9d 100644
--- a/tempest/api/network/test_floating_ips_negative.py
+++ b/tempest/api/network/test_floating_ips_negative.py
@@ -17,6 +17,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest import config
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -81,6 +82,7 @@
floating_network_id=self.ext_net_id)
floating_ip = body['floatingip']
self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
self.floating_ips_client.delete_floatingip, floating_ip['id'])
# Associate floating IP to the other port
self.assertRaises(
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7345fd1..eba1f6c 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -160,7 +160,8 @@
def test_create_update_delete_network_subnet(self):
# Create a network
network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network['id'])
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
@@ -176,6 +177,8 @@
body = self.subnets_client.update_subnet(subnet_id, name=new_name)
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_name)
+ # Verify network delete
+ self.networks_client.delete_network(network['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
@@ -313,11 +316,12 @@
@decorators.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network['id'])
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
- 'dns_nameservers',
+ 'dns_nameservers',
'allocation_pools']))
subnet_id = subnet['id']
new_gateway = str(netaddr.IPAddress(
diff --git a/tempest/api/network/test_networks_negative.py b/tempest/api/network/test_networks_negative.py
index bc4f41f..3af67dd 100644
--- a/tempest/api/network/test_networks_negative.py
+++ b/tempest/api/network/test_networks_negative.py
@@ -79,7 +79,8 @@
non_exist_net_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.ports_client.create_port,
- network_id=non_exist_net_id)
+ network_id=non_exist_net_id,
+ name=data_utils.rand_name(self.__class__.__name__))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('cf8eef21-4351-4f53-adcd-cc5cb1e76b92')
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 2c9159c..10121de 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -23,6 +23,7 @@
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -52,7 +53,8 @@
def _create_subnet(self, network, gateway='',
cidr=None, mask_bits=None, **kwargs):
subnet = self.create_subnet(network, gateway, cidr, mask_bits)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet['id'])
return subnet
def _create_network(self, network_name=None, **kwargs):
@@ -60,7 +62,8 @@
self.__class__.__name__)
network = self.networks_client.create_network(
name=network_name, **kwargs)['network']
- self.addCleanup(self.networks_client.delete_network,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network,
network['id'])
return network
@@ -68,7 +71,9 @@
@decorators.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
- body = self.ports_client.create_port(network_id=self.network['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
@@ -107,7 +112,7 @@
address = self.cidr
address.prefixlen = self.mask_bits
if ((address.version == 4 and address.prefixlen >= 30) or
- (address.version == 6 and address.prefixlen >= 126)):
+ (address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
@@ -115,14 +120,18 @@
self._create_subnet(network, cidr=address,
mask_bits=address.prefixlen,
**allocation_pools)
- body = self.ports_client.create_port(network_id=net_id)
- self.addCleanup(self.ports_client.delete_port, body['port']['id'])
+ body = self.ports_client.create_port(
+ network_id=net_id,
+ name=data_utils.rand_name(self.__class__.__name__))
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
+ self.ports_client.delete_port(port['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
@@ -167,10 +176,16 @@
network = self._create_network()
self._create_subnet(network)
# Create two ports
- port_1 = self.ports_client.create_port(network_id=network['id'])
- self.addCleanup(self.ports_client.delete_port, port_1['port']['id'])
- port_2 = self.ports_client.create_port(network_id=network['id'])
- self.addCleanup(self.ports_client.delete_port, port_2['port']['id'])
+ port_1 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_1['port']['id'])
+ port_2 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
fixed_ips = 'ip_address=' + port_1_fixed_ip
@@ -182,14 +197,14 @@
'Ports from multiple tenants are in the list resp')
port_ids = [port['id'] for port in ports]
fixed_ips = [port['fixed_ips'] for port in ports]
- port_ips = []
- for addr in fixed_ips:
- port_ips.extend([port['ip_address'] for port in addr])
-
port_net_ids = [port['network_id'] for port in ports]
self.assertIn(port_1['port']['id'], port_ids)
- self.assertIn(port_1_fixed_ip, port_ips)
self.assertIn(network['id'], port_net_ids)
+ # Check that every port has a fixed_ip that matches the query
+ for addr in fixed_ips:
+ port_ips = [port['ip_address'] for port in addr]
+ self.assertIn(port_1_fixed_ip, port_ips,
+ 'Port not matching IP filter found')
@decorators.idempotent_id('79895408-85d5-460d-94e7-9531c5fd9123')
@testtools.skipUnless(
@@ -217,13 +232,19 @@
# Create two ports
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_1}]
- port_1 = self.ports_client.create_port(network_id=network['id'],
- fixed_ips=fixed_ips)
- self.addCleanup(self.ports_client.delete_port, port_1['port']['id'])
+ port_1 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
+ fixed_ips=fixed_ips)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_1['port']['id'])
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_2}]
- port_2 = self.ports_client.create_port(network_id=network['id'],
- fixed_ips=fixed_ips)
- self.addCleanup(self.ports_client.delete_port, port_2['port']['id'])
+ port_2 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
+ fixed_ips=fixed_ips)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port_2['port']['id'])
# Scenario 1: List port1 (port2 is filtered out)
if ip_address_1[:-1] != ip_address_2[:-1]:
@@ -272,12 +293,16 @@
network = self._create_network()
self._create_subnet(network)
router = self.create_router()
- self.addCleanup(self.routers_client.delete_router, router['id'])
- port = self.ports_client.create_port(network_id=network['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.routers_client.delete_router, router['id'])
+ port = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
# Add router interface to port created above
self.routers_client.add_router_interface(router['id'],
port_id=port['port']['id'])
- self.addCleanup(self.routers_client.remove_router_interface,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.routers_client.remove_router_interface,
router['id'], port_id=port['port']['id'])
# List ports filtered by router_id
port_list = self.ports_client.list_ports(device_id=router['id'])
@@ -311,7 +336,8 @@
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
- self.addCleanup(self.ports_client.delete_port, port['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
@@ -334,7 +360,8 @@
for name in security_groups_names:
group_create_body = sec_grps_client.create_security_group(
name=name)
- self.addCleanup(self.security_groups_client.delete_security_group,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.security_groups_client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
@@ -342,25 +369,28 @@
sec_grp_name = data_utils.rand_name('secgroup')
security_group = sec_grps_client.create_security_group(
name=sec_grp_name)
- self.addCleanup(self.security_groups_client.delete_security_group,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.security_groups_client.delete_security_group,
security_group['security_group']['id'])
post_body = {
- "name": data_utils.rand_name('port-'),
+ "name": data_utils.rand_name(self.__class__.__name__),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.ports_client.create_port(**post_body)
- self.addCleanup(self.ports_client.delete_port, body['port']['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
- update_body = {"name": data_utils.rand_name('port-'),
- "admin_state_up": False,
- "fixed_ips": fixed_ip_2,
- "security_groups": security_groups_list}
+ update_body = {
+ "name": data_utils.rand_name(self.__class__.__name__),
+ "admin_state_up": False,
+ "fixed_ips": fixed_ip_2,
+ "security_groups": security_groups_list}
body = self.ports_client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
@@ -395,14 +425,19 @@
@decorators.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
- body = self.ports_client.create_port(network_id=self.network['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
old_port = body['port']
free_mac_address = old_port['mac_address']
self.ports_client.delete_port(old_port['id'])
# Create a new port with user defined mac
- body = self.ports_client.create_port(network_id=self.network['id'],
- mac_address=free_mac_address)
- self.addCleanup(self.ports_client.delete_port, body['port']['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ mac_address=free_mac_address,
+ name=data_utils.rand_name(self.__class__.__name__))
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, body['port']['id'])
port = body['port']
body = self.ports_client.show_port(port['id'])
show_port = body['port']
@@ -418,7 +453,8 @@
network = self._create_network()
self._create_subnet(network)
port = self.create_port(network, security_groups=[])
- self.addCleanup(self.ports_client.delete_port, port['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 8b03631..30423e3 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -20,6 +20,7 @@
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -27,22 +28,6 @@
class RoutersTest(base.BaseNetworkTest):
- def _cleanup_router(self, router):
- self.delete_router(router)
-
- def _create_router(self, name=None, admin_state_up=False,
- external_network_id=None, enable_snat=None):
- # associate a cleanup with created routers to avoid quota limits
- router = self.create_router(name, admin_state_up,
- external_network_id, enable_snat)
- self.addCleanup(self._cleanup_router, router)
- return router
-
- def _create_subnet(self, network, gateway='', cidr=None):
- subnet = self.create_subnet(network, gateway, cidr)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
- return subnet
-
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
interface = self.routers_client.add_router_interface(
router_id, subnet_id=subnet_id)
@@ -70,10 +55,11 @@
def test_create_show_list_update_delete_router(self):
# Create a router
router_name = data_utils.rand_name(self.__class__.__name__ + '-router')
- router = self._create_router(
- name=router_name,
+ router = self.create_router(
+ router_name,
admin_state_up=False,
external_network_id=CONF.network.public_network_id)
+ self.addCleanup(self.delete_router, router)
self.assertEqual(router['name'], router_name)
self.assertEqual(router['admin_state_up'], False)
self.assertEqual(
@@ -104,10 +90,13 @@
network_name = data_utils.rand_name(self.__class__.__name__)
network = self.networks_client.create_network(
name=network_name)['network']
- self.addCleanup(self.networks_client.delete_network,
- network['id'])
- subnet = self._create_subnet(network)
- router = self._create_router()
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network['id'])
+ subnet = self.create_subnet(network)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
# Add router interface with subnet id
interface = self.routers_client.add_router_interface(
router['id'], subnet_id=subnet['id'])
@@ -127,17 +116,21 @@
network_name = data_utils.rand_name(self.__class__.__name__)
network = self.networks_client.create_network(
name=network_name)['network']
- self.addCleanup(self.networks_client.delete_network,
- network['id'])
- self._create_subnet(network)
- router = self._create_router()
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network['id'])
+ subnet = self.create_subnet(network)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
port_body = self.ports_client.create_port(
- network_id=network['id'])
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
# add router interface to port created above
interface = self.routers_client.add_router_interface(
router['id'],
port_id=port_body['port']['id'])
- self.addCleanup(self.routers_client.remove_router_interface,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.routers_client.remove_router_interface,
router['id'], port_id=port_body['port']['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
@@ -146,37 +139,8 @@
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
-
- @decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
- @utils.requires_ext(extension='ext-gw-mode', service='network')
- @testtools.skipUnless(CONF.network.public_network_id,
- 'The public_network_id option must be specified.')
- @decorators.skip_because(bug='1676207')
- def test_create_router_set_gateway_with_fixed_ip(self):
- # Don't know public_network_address, so at first create address
- # from public_network and delete
- port = self.admin_ports_client.create_port(
- network_id=CONF.network.public_network_id)['port']
- self.admin_ports_client.delete_port(port_id=port['id'])
-
- fixed_ip = {
- 'subnet_id': port['fixed_ips'][0]['subnet_id'],
- 'ip_address': port['fixed_ips'][0]['ip_address']
- }
- external_gateway_info = {
- 'network_id': CONF.network.public_network_id,
- 'external_fixed_ips': [fixed_ip]
- }
-
- # Create a router and set gateway to fixed_ip
- router = self.admin_routers_client.create_router(
- external_gateway_info=external_gateway_info)['router']
- self.addCleanup(self.admin_routers_client.delete_router,
- router_id=router['id'])
- # Examine router's gateway is equal to fixed_ip
- self.assertEqual(router['external_gateway_info'][
- 'external_fixed_ips'][0]['ip_address'],
- fixed_ip['ip_address'])
+ self.routers_client.remove_router_interface(
+ router['id'], port_id=port_body['port']['id'])
@decorators.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
@utils.requires_ext(extension='extraroute', service='network')
@@ -188,7 +152,8 @@
test_routes = []
routes_num = 4
# Create a router
- router = self._create_router(admin_state_up=True)
+ router = self.create_router(admin_state_up=True)
+ self.addCleanup(self.delete_router, router)
self.addCleanup(
self._delete_extra_routes,
router['id'])
@@ -198,9 +163,12 @@
network_name = data_utils.rand_name(self.__class__.__name__)
network = self.networks_client.create_network(
name=network_name)['network']
- self.addCleanup(self.networks_client.delete_network,
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network,
network['id'])
subnet = self.create_subnet(network, cidr=next_cidr)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet['id'])
next_cidr = next_cidr.next()
# Add router interface with subnet id
@@ -247,7 +215,8 @@
@decorators.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
def test_update_router_admin_state(self):
- router = self._create_router()
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
self.assertFalse(router['admin_state_up'])
# Update router admin state
update_body = self.routers_client.update_router(router['id'],
@@ -262,16 +231,22 @@
network_name = data_utils.rand_name(self.__class__.__name__)
network01 = self.networks_client.create_network(
name=network_name)['network']
- self.addCleanup(self.networks_client.delete_network,
- network01['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network01['id'])
+ network_name = data_utils.rand_name(self.__class__.__name__)
network02 = self.networks_client.create_network(
- name=data_utils.rand_name(self.__class__.__name__))['network']
- self.addCleanup(self.networks_client.delete_network,
- network02['id'])
- subnet01 = self._create_subnet(network01)
+ name=network_name)['network']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network02['id'])
+ subnet01 = self.create_subnet(network01)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet01['id'])
sub02_cidr = self.cidr.next()
- subnet02 = self._create_subnet(network02, cidr=sub02_cidr)
- router = self._create_router()
+ subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet02['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
self._verify_router_interface(router['id'], subnet01['id'],
@@ -286,10 +261,13 @@
network_name = data_utils.rand_name(self.__class__.__name__)
network = self.networks_client.create_network(
name=network_name)['network']
- self.addCleanup(self.networks_client.delete_network,
- network['id'])
- subnet = self._create_subnet(network)
- router = self._create_router()
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.networks_client.delete_network, network['id'])
+ subnet = self.create_subnet(network)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.subnets_client.delete_subnet, subnet['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
fixed_ip = [{'subnet_id': subnet['id']}]
interface = self._add_router_interface_with_subnet_id(router['id'],
subnet['id'])
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index ffc1fca..ef19122 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -16,6 +16,7 @@
from tempest.api.network import base_security_groups as base
from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -49,8 +50,8 @@
)
sec_group_rule = rule_create_body['security_group_rule']
- self.addCleanup(self._delete_security_group_rule,
- sec_group_rule['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_security_group_rule, sec_group_rule['id'])
expected = {'direction': direction, 'protocol': protocol,
'ethertype': ethertype, 'port_range_min': port_range_min,
@@ -104,6 +105,8 @@
self.assertEqual(show_body['security_group']['name'], new_name)
self.assertEqual(show_body['security_group']['description'],
new_description)
+ # Delete security group
+ self._delete_security_group(group_create_body['security_group']['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('cfb99e0e-7410-4a3d-8a0c-959a63ee77e9')
@@ -138,6 +141,8 @@
for rule in rule_list_body['security_group_rules']]
self.assertIn(rule_create_body['security_group_rule']['id'],
rule_list)
+ self._delete_security_group_rule(
+ rule_create_body['security_group_rule']['id'])
@decorators.idempotent_id('87dfbcf9-1849-43ea-b1e4-efa3eeae9f71')
def test_create_security_group_rule_with_additional_args(self):
@@ -170,7 +175,14 @@
sg_id = group_create_body['security_group']['id']
direction = 'ingress'
- protocol = 'icmp'
+ # The Neutron API accepts 'icmp', 'icmpv6' and 'ipv6-icmp' for
+ # IPv6 ICMP protocol names, but the latter is preferred and the
+ # others considered "legacy". Use 'ipv6-icmp' as the API could
+ # change to return only that value, see
+ # https://review.opendev.org/#/c/453346/
+ # The neutron-tempest-plugin API tests pass all three and verify
+ # the output, so there is no need to duplicate that here.
+ protocol = 'ipv6-icmp' if self._ip_version == 6 else 'icmp'
icmp_type_codes = [(3, 2), (3, 0), (8, 0), (0, 0), (11, None)]
for icmp_type, icmp_code in icmp_type_codes:
self._create_verify_security_group_rule(sg_id, direction,
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index 85f6896..2b9719a 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -103,9 +103,10 @@
List tags.
Remove a tag.
- v2.0 of the Neutron API is assumed. The tag-ext extension allows users to
- set tags on the following resources: subnets, ports, routers and
- subnetpools.
+ v2.0 of the Neutron API is assumed. The tag-ext or standard-attr-tag
+ extension allows users to set tags on the following resources: subnets,
+ ports, routers and subnetpools.
+ from stein release the tag-ext has been renamed to standard-attr-tag
"""
# NOTE(felipemonteiro): The supported resource names are plural. Use
@@ -115,8 +116,12 @@
@classmethod
def skip_checks(cls):
super(TagsExtTest, cls).skip_checks()
- if not utils.is_extension_enabled('tag-ext', 'network'):
- msg = "tag-ext extension not enabled."
+ # Added condition to support backward compatiblity since
+ # tag-ext has been renamed to standard-attr-tag
+ if not (utils.is_extension_enabled('tag-ext', 'network') or
+ utils.is_extension_enabled('standard-attr-tag', 'network')):
+ msg = ("neither tag-ext nor standard-attr-tag extensions "
+ "are enabled.")
raise cls.skipException(msg)
@classmethod
diff --git a/tempest/api/network/test_versions.py b/tempest/api/network/test_versions.py
index 2f01e50..020cb5c 100644
--- a/tempest/api/network/test_versions.py
+++ b/tempest/api/network/test_versions.py
@@ -29,7 +29,7 @@
"""
result = self.network_versions_client.list_versions()
- expected_versions = ('v2.0')
+ expected_versions = ('v2.0',)
expected_resources = ('id', 'links', 'status')
received_list = result.values()
@@ -38,3 +38,14 @@
for resource in expected_resources:
self.assertIn(resource, version)
self.assertIn(version['id'], expected_versions)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('e64b7216-3178-4263-967c-d389290988bf')
+ def test_show_api_v2_details(self):
+ """Test that GET /v2.0/ returns expected resources."""
+ current_version = 'v2.0'
+ expected_resources = ('subnet', 'network', 'port')
+ result = self.network_versions_client.show_version(current_version)
+ actual_resources = [r['name'] for r in result['resources']]
+ for resource in expected_resources:
+ self.assertIn(resource, actual_resources)
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index 765bc6d..e9ca0b1 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -38,9 +38,9 @@
def test_read_object_with_rights(self):
# attempt to read object using authorized user
# update X-Container-Read metadata ACL
- tenant_name = self.os_roles_operator_alt.credentials.tenant_name
- username = self.os_roles_operator_alt.credentials.username
- cont_headers = {'X-Container-Read': tenant_name + ':' + username}
+ tenant_id = self.os_roles_operator_alt.credentials.tenant_id
+ user_id = self.os_roles_operator_alt.credentials.user_id
+ cont_headers = {'X-Container-Read': tenant_id + ':' + user_id}
container_client = self.os_roles_operator.container_client
resp_meta, _ = (
container_client.create_update_or_delete_container_metadata(
@@ -66,9 +66,9 @@
def test_write_object_with_rights(self):
# attempt to write object using authorized user
# update X-Container-Write metadata ACL
- tenant_name = self.os_roles_operator_alt.credentials.tenant_name
- username = self.os_roles_operator_alt.credentials.username
- cont_headers = {'X-Container-Write': tenant_name + ':' + username}
+ tenant_id = self.os_roles_operator_alt.credentials.tenant_id
+ user_id = self.os_roles_operator_alt.credentials.user_id
+ cont_headers = {'X-Container-Write': tenant_id + ':' + user_id}
container_client = self.os_roles_operator.container_client
resp_meta, _ = (
container_client.create_update_or_delete_container_metadata(
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index 982c4a1..fcd9a7c 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -31,9 +31,10 @@
Quotas are set by adding meta values to the container,
and are validated when set:
- - X-Container-Meta-Quota-Bytes:
+
+ - X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- - X-Container-Meta-Quota-Count:
+ - X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index f61d9f8..1567e06 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -34,13 +34,12 @@
def setUp(self):
super(CrossdomainTest, self).setUp()
- # Turning http://.../v1/foobar into http://.../
- self.account_client.skip_path()
-
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
- resp, body = self.account_client.get("crossdomain.xml", {})
+ url = self.account_client._get_base_version_url() + "crossdomain.xml"
+ resp, body = self.account_client.raw_request(url, "GET")
+ self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index a186f9e..8e9e406 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -22,13 +22,12 @@
def setUp(self):
super(HealthcheckTest, self).setUp()
- # Turning http://.../v1/foobar into http://.../
- self.account_client.skip_path()
@decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
def test_get_healthcheck(self):
-
- resp, _ = self.account_client.get("healthcheck", {})
+ url = self.account_client._get_base_version_url() + "healthcheck"
+ resp, body = self.account_client.raw_request(url, "GET")
+ self.account_client._error_checker(resp, body)
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index affed6b..1351704 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -21,17 +21,6 @@
class BackendsCapabilitiesAdminTestsJSON(base.BaseVolumeAdminTest):
- CAPABILITIES = ('namespace',
- 'vendor_name',
- 'volume_backend_name',
- 'pool_name',
- 'driver_version',
- 'storage_protocol',
- 'display_name',
- 'description',
- 'visibility',
- 'properties')
-
@classmethod
def resource_setup(cls):
super(BackendsCapabilitiesAdminTestsJSON, cls).resource_setup()
@@ -44,12 +33,8 @@
@decorators.idempotent_id('3750af44-5ea2-4cd4-bc3e-56e7e6caf854')
def test_get_capabilities_backend(self):
# Test backend properties
- backend = self.admin_capabilities_client.show_backend_capabilities(
- self.hosts[0])
-
- # Verify getting capabilities parameters from a backend
- for key in self.CAPABILITIES:
- self.assertIn(key, backend)
+ # Check response schema
+ self.admin_capabilities_client.show_backend_capabilities(self.hosts[0])
@decorators.idempotent_id('a9035743-d46a-47c5-9cb7-3c80ea16dea0')
def test_compare_volume_stats_values(self):
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index f695f51..c57766e 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -113,7 +113,8 @@
self._delete_group_snapshot(group_snapshot)
group_snapshots = self.group_snapshots_client.list_group_snapshots()[
'group_snapshots']
- self.assertEmpty(group_snapshots)
+ self.assertNotIn((group_snapshot['name'], group_snapshot['id']),
+ [(m['name'], m['id']) for m in group_snapshots])
@decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
def test_create_group_from_group_snapshot(self):
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 9907497..8048017 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -20,18 +20,6 @@
CONF = config.CONF
-MESSAGE_KEYS = [
- 'created_at',
- 'event_id',
- 'guaranteed_until',
- 'id',
- 'message_level',
- 'request_id',
- 'resource_type',
- 'resource_uuid',
- 'user_message',
- 'links']
-
class UserMessagesTest(base.BaseVolumeAdminTest):
_api_version = 3
@@ -66,18 +54,11 @@
message_id = self._create_user_message()
self.addCleanup(self.messages_client.delete_message, message_id)
- # show message
- message = self.messages_client.show_message(message_id)['message']
- for key in MESSAGE_KEYS:
- self.assertIn(key, message.keys(), 'Missing expected key %s' % key)
+ # show message, check response schema
+ self.messages_client.show_message(message_id)
- # list messages
- messages = self.messages_client.list_messages()['messages']
- self.assertIsInstance(messages, list)
- for message in messages:
- for key in MESSAGE_KEYS:
- self.assertIn(key, message.keys(),
- 'Missing expected key %s' % key)
+ # list messages, check response schema
+ self.messages_client.list_messages()
@decorators.idempotent_id('c6eb6901-cdcc-490f-b735-4fe251842aed')
def test_delete_message(self):
diff --git a/tempest/api/volume/admin/test_volume_hosts.py b/tempest/api/volume/admin/test_volume_hosts.py
index 7e53ce8..83c27e1 100644
--- a/tempest/api/volume/admin/test_volume_hosts.py
+++ b/tempest/api/volume/admin/test_volume_hosts.py
@@ -26,13 +26,6 @@
"The count of volume hosts is < 2, "
"response of list hosts is: %s" % hosts)
- # Check elements in volume hosts list
- host_list_keys = ['service', 'host_name', 'last-update',
- 'zone', 'service-status', 'service-state']
- for host in hosts:
- for key in host_list_keys:
- self.assertIn(key, host)
-
@decorators.idempotent_id('21168d57-b373-4b71-a3ac-f2c88f0c5d31')
def test_show_host(self):
hosts = self.admin_hosts_client.list_hosts()['hosts']
@@ -53,12 +46,6 @@
"all hosts that found are: %s" % hosts)
# Check each cinder-volume host.
- host_detail_keys = ['project', 'volume_count', 'snapshot_count',
- 'host', 'total_volume_gb', 'total_snapshot_gb']
for host in c_vol_hosts:
host_details = self.admin_hosts_client.show_host(host)['host']
self.assertNotEmpty(host_details)
- for detail in host_details:
- self.assertIn('resource', detail)
- for key in host_detail_keys:
- self.assertIn(key, detail['resource'])
diff --git a/tempest/api/volume/admin/test_volume_pools.py b/tempest/api/volume/admin/test_volume_pools.py
index d389c26..744bc01 100644
--- a/tempest/api/volume/admin/test_volume_pools.py
+++ b/tempest/api/volume/admin/test_volume_pools.py
@@ -24,6 +24,7 @@
def _assert_pools(self, with_detail=False):
cinder_pools = self.admin_scheduler_stats_client.list_pools(
detail=with_detail)['pools']
+ self.assertNotEmpty(cinder_pools, "no cinder pools listed.")
self.assertIn('name', cinder_pools[0])
if with_detail:
self.assertIn(CONF.volume.vendor_name,
diff --git a/tempest/api/volume/admin/test_volume_quota_classes.py b/tempest/api/volume/admin/test_volume_quota_classes.py
index 75dca41..ee52354 100644
--- a/tempest/api/volume/admin/test_volume_quota_classes.py
+++ b/tempest/api/volume/admin/test_volume_quota_classes.py
@@ -44,12 +44,10 @@
@decorators.idempotent_id('abb9198e-67d0-4b09-859f-4f4a1418f176')
def test_show_default_quota(self):
+ # response body is validated by schema
default_quotas = self.admin_quota_classes_client.show_quota_class_set(
'default')['quota_class_set']
- self.assertIn('id', default_quotas)
self.assertEqual('default', default_quotas.pop('id'))
- for key in QUOTA_KEYS:
- self.assertIn(key, default_quotas)
@decorators.idempotent_id('a7644c63-2669-467a-b00e-452dd5c5397b')
def test_update_default_quota(self):
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 053a7d9..b073604 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -19,7 +19,6 @@
QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
'backup_gigabytes', 'per_volume_gigabytes']
-QUOTA_USAGE_KEYS = ['reserved', 'limit', 'in_use']
class VolumeQuotasAdminTestJSON(base.BaseVolumeAdminTest):
@@ -55,17 +54,13 @@
@decorators.idempotent_id('59eada70-403c-4cef-a2a3-a8ce2f1b07a0')
def test_list_quotas(self):
- quotas = (self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
- ['quota_set'])
- for key in QUOTA_KEYS:
- self.assertIn(key, quotas)
+ # Check response schema
+ self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
@decorators.idempotent_id('2be020a2-5fdd-423d-8d35-a7ffbc36e9f7')
def test_list_default_quotas(self):
- quotas = self.admin_quotas_client.show_default_quota_set(
- self.demo_tenant_id)['quota_set']
- for key in QUOTA_KEYS:
- self.assertIn(key, quotas)
+ # Check response schema
+ self.admin_quotas_client.show_default_quota_set(self.demo_tenant_id)
@decorators.idempotent_id('3d45c99e-cc42-4424-a56e-5cbd212b63a6')
def test_update_all_quota_resources_for_tenant(self):
@@ -92,13 +87,9 @@
@decorators.idempotent_id('18c51ae9-cb03-48fc-b234-14a19374dbed')
def test_show_quota_usage(self):
- quota_usage = self.admin_quotas_client.show_quota_set(
- self.os_admin.credentials.tenant_id,
- params={'usage': True})['quota_set']
- for key in QUOTA_KEYS:
- self.assertIn(key, quota_usage)
- for usage_key in QUOTA_USAGE_KEYS:
- self.assertIn(usage_key, quota_usage[key])
+ # Check response schema
+ self.admin_quotas_client.show_quota_set(
+ self.os_admin.credentials.tenant_id, params={'usage': True})
@decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
def test_delete_quota(self):
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 915aeec..5c7ab15 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -45,13 +45,6 @@
cls.addClassResourceCleanup(cls.admin_quotas_client.update_quota_set,
cls.demo_tenant_id, **cleanup_quota_set)
- cls.shared_quota_set = {'gigabytes': 2 * CONF.volume.volume_size,
- 'volumes': 1}
-
- cls.admin_quotas_client.update_quota_set(
- cls.demo_tenant_id,
- **cls.shared_quota_set)
-
# NOTE(gfidente): no need to delete in tearDown as
# they are created using utility wrapper methods.
cls.volume = cls.create_volume()
@@ -59,6 +52,8 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('bf544854-d62a-47f2-a681-90f7a47d86b6')
def test_quota_volumes(self):
+ self.admin_quotas_client.update_quota_set(self.demo_tenant_id,
+ volumes=1, gigabytes=-1)
self.assertRaises(lib_exc.OverLimit,
self.volumes_client.create_volume,
size=CONF.volume.volume_size)
@@ -66,17 +61,18 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('2dc27eee-8659-4298-b900-169d71a91374')
def test_quota_volume_gigabytes(self):
- # NOTE(gfidente): quota set needs to be changed for this test
- # or we may be limited by the volumes or snaps quota number, not by
- # actual gigs usage; next line ensures shared set is restored.
- self.addCleanup(self.admin_quotas_client.update_quota_set,
- self.demo_tenant_id,
- **self.shared_quota_set)
- new_quota_set = {'gigabytes': CONF.volume.volume_size,
- 'volumes': 2, 'snapshots': 1}
self.admin_quotas_client.update_quota_set(
- self.demo_tenant_id,
- **new_quota_set)
+ self.demo_tenant_id, gigabytes=CONF.volume.volume_size, volumes=-1)
self.assertRaises(lib_exc.OverLimit,
self.volumes_client.create_volume,
- size=CONF.volume.volume_size)
+ size=CONF.volume.volume_size * 2)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('d321dc21-d8c6-401f-95fe-49f4845f1a6d')
+ def test_volume_extend_gigabytes_quota_deviation(self):
+ self.admin_quotas_client.update_quota_set(
+ self.demo_tenant_id, gigabytes=CONF.volume.volume_size)
+ self.assertRaises(lib_exc.OverLimit,
+ self.volumes_client.extend_volume,
+ self.volume['id'],
+ new_size=CONF.volume.volume_size * 2)
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 1c56eb2..18e0b9b 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -36,7 +36,7 @@
# process is finished.
fetched_list = self.admin_volume_client.list_volumes(
params={'all_tenants': True,
- 'display_name': vol['name']})['volumes']
+ 'name': vol['name']})['volumes']
for fetched_vol in fetched_list:
if fetched_vol['id'] != vol['id']:
@@ -94,7 +94,7 @@
super(VolumeRetypeTest, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
- raise cls.skipException("Cinder multi-backend feature disabled.")
+ raise cls.skipException("Cinder multi-backend feature disabled")
if len(set(CONF.volume.backend_names)) < 2:
raise cls.skipException("Requires at least two different "
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 1077524..ecc850e 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -92,15 +92,12 @@
'extra_specs': extra_specs,
'os-volume-type-access:is_public': True}
body = self.create_volume_type(**params)
- self.assertIn('name', body)
self.assertEqual(name, body['name'],
"The created volume_type name is not equal "
"to the requested name")
self.assertEqual(description, body['description'],
"The created volume_type_description name is "
"not equal to the requested name")
- self.assertIsNotNone(body['id'],
- "Field volume_type id is empty or not found.")
fetched_volume_type = self.admin_volume_types_client.show_volume_type(
body['id'])['volume_type']
self.assertEqual(name, fetched_volume_type['name'],
@@ -130,7 +127,6 @@
encryption_type = \
self.admin_encryption_types_client.create_encryption_type(
volume_type_id, **create_kwargs)['encryption']
- self.assertIn('volume_type_id', encryption_type)
for key in create_kwargs:
self.assertEqual(create_kwargs[key], encryption_type[key],
'The created encryption_type %s is different '
@@ -193,8 +189,13 @@
'is_public': is_public}
updated_vol_type = self.admin_volume_types_client.update_volume_type(
volume_type['id'], **kwargs)['volume_type']
-
- # Verify volume type details were updated
self.assertEqual(name, updated_vol_type['name'])
self.assertEqual(description, updated_vol_type['description'])
self.assertEqual(is_public, updated_vol_type['is_public'])
+
+ # Verify volume type details were updated
+ fetched_volume_type = self.admin_volume_types_client.show_volume_type(
+ volume_type['id'])['volume_type']
+ self.assertEqual(name, fetched_volume_type['name'])
+ self.assertEqual(description, fetched_volume_type['description'])
+ self.assertEqual(is_public, fetched_volume_type['is_public'])
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 3e0deef..5bac3d8 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -23,6 +23,7 @@
class VolumesActionsTest(base.BaseVolumeAdminTest):
+ create_default_network = True
def _create_reset_and_force_delete_temp_volume(self, status=None):
# Create volume, reset volume status, and force delete temp volume
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 64fe29a..bcbcf43 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -30,6 +30,9 @@
tempest.test.BaseTestCase):
"""Base test case class for all Cinder API tests."""
+ # Set this to True in subclasses to create a default network. See
+ # https://bugs.launchpad.net/tempest/+bug/1844568
+ create_default_network = False
_api_version = 2
# if api_v2 is not enabled while api_v3 is enabled, the volume v2 classes
# should be transferred to volume v3 classes.
@@ -63,7 +66,9 @@
@classmethod
def setup_credentials(cls):
- cls.set_network_resources()
+ cls.set_network_resources(
+ network=cls.create_default_network,
+ subnet=cls.create_default_network)
super(BaseVolumeTest, cls).setup_credentials()
@classmethod
@@ -124,6 +129,10 @@
name = data_utils.rand_name(cls.__name__ + '-Volume')
kwargs['name'] = name
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.delete_volume, cls.volumes_client,
diff --git a/tempest/api/volume/test_versions.py b/tempest/api/volume/test_versions.py
index b4d48db..1e5c9de 100644
--- a/tempest/api/volume/test_versions.py
+++ b/tempest/api/volume/test_versions.py
@@ -17,13 +17,28 @@
class VersionsTest(base.BaseVolumeTest):
+ """Test cinder versions"""
_api_version = 3
@decorators.idempotent_id('77838fc4-b49b-4c64-9533-166762517369')
@decorators.attr(type='smoke')
def test_list_versions(self):
+ """Test listing cinder versions"""
# NOTE: The version data is checked on service client side
# with JSON-Schema validation. It is enough to just call
# the API here.
self.versions_client.list_versions()
+
+ @decorators.idempotent_id('7f755ae2-caa9-4049-988c-331d8f7a579f')
+ def test_show_version(self):
+ "Test getting cinder version details"
+ # NOTE: The version data is checked on service client side
+ # with JSON-Schema validation. So we will loop through each
+ # version and call show version.
+ versions = self.versions_client.list_versions()['versions']
+ for version_dict in versions:
+ version = version_dict['id']
+ major_version = version.split('.')[0]
+ response = self.versions_client.show_version(major_version)
+ self.assertEqual(version, response['versions'][0]['id'])
diff --git a/tempest/api/volume/test_volume_absolute_limits.py b/tempest/api/volume/test_volume_absolute_limits.py
index 00a3375..4d64a95 100644
--- a/tempest/api/volume/test_volume_absolute_limits.py
+++ b/tempest/api/volume/test_volume_absolute_limits.py
@@ -23,7 +23,7 @@
# NOTE(zhufl): This inherits from BaseVolumeAdminTest because
# it requires force_tenant_isolation=True, which need admin
# credentials to create non-admin users for the tests.
-class AbsoluteLimitsTests(base.BaseVolumeAdminTest): # noqa
+class AbsoluteLimitsTests(base.BaseVolumeAdminTest): # noqa: T115
# avoid existing volumes of pre-defined tenant
force_tenant_isolation = True
diff --git a/tempest/api/volume/test_volume_delete_cascade.py b/tempest/api/volume/test_volume_delete_cascade.py
index bb32c11..53f1bca 100644
--- a/tempest/api/volume/test_volume_delete_cascade.py
+++ b/tempest/api/volume/test_volume_delete_cascade.py
@@ -58,8 +58,11 @@
@decorators.idempotent_id('994e2d40-de37-46e8-b328-a58fba7e4a95')
def test_volume_delete_cascade(self):
- # The case validates the ability to delete a volume
- # with associated snapshots.
+ """Test deleting a volume with associated snapshots
+
+ The case validates the ability to delete a volume
+ with associated snapshots.
+ """
# Create a volume
volume = self.create_volume()
@@ -78,9 +81,12 @@
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'Skip because of Bug#1677525')
def test_volume_from_snapshot_cascade_delete(self):
- # The case validates the ability to delete a volume with
- # associated snapshot while there is another volume created
- # from that snapshot.
+ """Test deleting a volume with associated volume-associated snapshot
+
+ The case validates the ability to delete a volume with
+ associated snapshot while there is another volume created
+ from that snapshot.
+ """
# Create a volume
volume = self.create_volume()
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index d203b2d..2151168 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -20,6 +20,7 @@
class VolumesMetadataTest(base.BaseVolumeTest):
+ """Test volume metadata"""
@classmethod
def resource_setup(cls):
@@ -34,6 +35,7 @@
@decorators.idempotent_id('6f5b125b-f664-44bf-910f-751591fe5769')
def test_crud_volume_metadata(self):
+ """Test creating, getting, updating and deleting of volume metadata"""
# Create metadata for the volume
metadata = {"key1": "value1",
"key2": "value2",
@@ -71,6 +73,7 @@
@decorators.idempotent_id('862261c5-8df4-475a-8c21-946e50e36a20')
def test_update_show_volume_metadata_item(self):
+ """Test updating and getting single volume metadata item"""
# Update metadata item for the volume
metadata = {"key1": "value1",
"key2": "value2",
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index c85e0bc..3eb81f5 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -20,6 +20,7 @@
class VolumesTransfersTest(base.BaseVolumeTest):
+ """Test volume transfer"""
credentials = ['primary', 'alt', 'admin']
@@ -34,6 +35,7 @@
@decorators.idempotent_id('4d75b645-a478-48b1-97c8-503f64242f1a')
def test_create_get_list_accept_volume_transfer(self):
+ """Test creating, getting, listing and accepting of volume transfer"""
# Create a volume first
volume = self.create_volume()
self.addCleanup(self.delete_volume,
@@ -63,8 +65,6 @@
# Accept a volume transfer by alt_tenant
body = self.alt_client.accept_volume_transfer(
transfer_id, auth_key=auth_key)['transfer']
- for key in ['id', 'name', 'links', 'volume_id']:
- self.assertIn(key, body)
waiters.wait_for_volume_resource_status(self.alt_volumes_client,
volume['id'], 'available')
accepted_volume = self.alt_volumes_client.show_volume(
@@ -76,6 +76,7 @@
@decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
def test_create_list_delete_volume_transfer(self):
+ """Test creating, listing and deleting volume transfer"""
# Create a volume first
volume = self.create_volume()
self.addCleanup(self.delete_volume,
@@ -95,8 +96,6 @@
# elements, and look for the created transfer.
transfers = self.client.list_volume_transfers(detail=True)['transfers']
self.assertNotEmpty(transfers)
- for transfer in transfers:
- self.assertIn('created_at', transfer)
volume_list = [transfer['volume_id'] for transfer in transfers]
self.assertIn(volume['id'], volume_list,
'Transfer not found for volume %s' % volume['id'])
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index be5638e..9edffc6 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -25,6 +25,7 @@
class VolumesActionsTest(base.BaseVolumeTest):
+ create_default_network = True
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index ea39a21..eb54426 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -23,6 +23,7 @@
class VolumesCloneTest(base.BaseVolumeTest):
+ """Test volume clone"""
@classmethod
def skip_checks(cls):
@@ -44,6 +45,7 @@
@decorators.idempotent_id('9adae371-a257-43a5-9555-dc7c88e66e0e')
def test_create_from_volume(self):
+ """Test cloning a volume with increasing size"""
# Creates a volume from another volume passing a size different from
# the source volume.
src_size = CONF.volume.volume_size
@@ -58,6 +60,7 @@
@decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
@utils.services('image')
def test_create_from_bootable_volume(self):
+ """Test cloning a bootable volume"""
# Create volume from image
img_uuid = CONF.compute.image_ref
src_vol = self.create_volume(imageRef=img_uuid)
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index bba7a0b..4bfb166 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -22,6 +22,7 @@
class VolumesCloneNegativeTest(base.BaseVolumeTest):
+ """Negative tests of volume clone"""
@classmethod
def skip_checks(cls):
@@ -32,6 +33,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9adae371-a257-43a5-459a-dc7c88e66e0e')
def test_create_from_volume_decreasing_size(self):
+ """Test cloning a volume with decreasing size will fail"""
# Creates a volume from another volume passing a size different from
# the source volume.
src_size = CONF.volume.volume_size + 1
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index ac9a9c7..041823d 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -28,11 +28,13 @@
class VolumesExtendTest(base.BaseVolumeTest):
+ """Test volume extend"""
@decorators.idempotent_id('9a36df71-a257-43a5-9555-dc7c88e66e0e')
def test_volume_extend(self):
+ """Test extend a volume"""
# Extend Volume Test.
- volume = self.create_volume(image_ref=self.image_ref)
+ volume = self.create_volume(imageRef=self.image_ref)
extend_size = volume['size'] * 2
self.volumes_client.extend_volume(volume['id'],
new_size=extend_size)
@@ -45,6 +47,7 @@
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
"Cinder volume snapshots are disabled")
def test_volume_extend_when_volume_has_snapshot(self):
+ """Test extending a volume which has a snapshot"""
volume = self.create_volume()
self.create_snapshot(volume['id'])
@@ -60,6 +63,7 @@
class VolumesExtendAttachedTest(base.BaseVolumeTest):
"""Tests extending the size of an attached volume."""
+ create_default_network = True
# We need admin credentials for getting instance action event details. By
# default a non-admin can list and show instance actions if they own the
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 71db95c..ade2deb 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -27,6 +27,7 @@
class VolumesGetTest(base.BaseVolumeTest):
+ """Test getting volume info"""
def _volume_create_get_update_delete(self, **kwargs):
# Create a volume, Get it's details and Delete the volume
@@ -118,12 +119,14 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51')
def test_volume_create_get_update_delete(self):
+ """Test Create/Get/Update/Delete of a blank volume"""
self._volume_create_get_update_delete(size=CONF.volume.volume_size)
@decorators.attr(type='smoke')
@decorators.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
@utils.services('image')
def test_volume_create_get_update_delete_from_image(self):
+ """Test Create/Get/Update/Delete of a volume created from image"""
image = self.images_client.show_image(CONF.compute.image_ref)
min_disk = image['min_disk']
disk_size = max(min_disk, CONF.volume.volume_size)
@@ -134,12 +137,14 @@
@testtools.skipUnless(CONF.volume_feature_enabled.clone,
'Cinder volume clones are disabled')
def test_volume_create_get_update_delete_as_clone(self):
+ """Test Create/Get/Update/Delete of a cloned volume"""
origin = self.create_volume()
self._volume_create_get_update_delete(source_volid=origin['id'],
size=CONF.volume.volume_size)
class VolumesSummaryTest(base.BaseVolumeTest):
+ """Test volume summary"""
_api_version = 3
min_microversion = '3.12'
@@ -147,6 +152,7 @@
@decorators.idempotent_id('c4f2431e-4920-4736-9e00-4040386b6feb')
def test_show_volume_summary(self):
+ """Test showing volume summary"""
volume_summary = \
self.volumes_client.show_volume_summary()['volume-summary']
for key in ['total_size', 'total_count']:
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index d5358ab..2345698 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -17,7 +17,7 @@
import operator
import random
-from six.moves.urllib import parse
+from six.moves.urllib.parse import urlparse
from testtools import matchers
from tempest.api.volume import base
@@ -333,7 +333,19 @@
# If the current iteration is from a 'next' link, check that the
# absolute url is the same as the one used for this request
if next:
- self.assertEqual(next, response.response['content-location'])
+ curr = response.response['content-location']
+ currparsed = urlparse(curr)
+ nextparsed = urlparse(next)
+ # Depending on the environment, certain fields are omitted
+ # from url (ie port). The fields to check are defined here.
+ fieldscheck = ['scheme', 'hostname', 'path', 'query', 'params',
+ 'fragment']
+ for field in fieldscheck:
+ self.assertEqual(getattr(currparsed, field),
+ getattr(nextparsed, field),
+ 'Incorrect link to next page. URLs do '
+ 'not match at %s:\n%s\n%s' % (field, curr,
+ next))
# Get next from response
next = None
@@ -352,7 +364,7 @@
# If we can follow to the next page, get params from url to make
# request in the form of a relative URL
if next:
- params = parse.urlparse(next).query
+ params = urlparse(next).query
# If cannot follow make sure it's because we have finished
else:
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 1855386..bf221e8 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -25,6 +25,7 @@
class VolumesSnapshotTestJSON(base.BaseVolumeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -44,7 +45,7 @@
server = self.create_server()
# NOTE(zhufl) Here we create volume from self.image_ref for adding
# coverage for "creating snapshot from non-blank volume".
- volume = self.create_volume(image_ref=self.image_ref)
+ volume = self.create_volume(imageRef=self.image_ref)
self.attach_volume(server['id'], volume['id'])
# Snapshot a volume which attached to an instance with force=False
diff --git a/tempest/api/volume/test_volumes_snapshots_list.py b/tempest/api/volume/test_volumes_snapshots_list.py
index 8a416ea..f4f039c 100644
--- a/tempest/api/volume/test_volumes_snapshots_list.py
+++ b/tempest/api/volume/test_volumes_snapshots_list.py
@@ -109,7 +109,7 @@
snap_list = self.snapshots_client.list_snapshots(
sort_key=sort_key, sort_dir=sort_dir)['snapshots']
self.assertNotEmpty(snap_list)
- if sort_key is 'display_name':
+ if sort_key == 'display_name':
sort_key = 'name'
# Note: On Cinder API, 'display_name' works as a sort key
# on a request, a volume name appears as 'name' on the response.
diff --git a/tempest/clients.py b/tempest/clients.py
index 204ce08..1db93a0 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -44,6 +44,7 @@
self._set_object_storage_clients()
self._set_image_clients()
self._set_network_clients()
+ self.placement_client = self.placement.PlacementClient()
# TODO(andreaf) This is maintained for backward compatibility
# with plugins, but it should removed eventually, since it was
# never a stable interface and it's not useful anyways
@@ -68,6 +69,9 @@
self.network_versions_client = self.network.NetworkVersionsClient()
self.service_providers_client = self.network.ServiceProvidersClient()
self.tags_client = self.network.TagsClient()
+ self.qos_client = self.network.QosClient()
+ self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
+ self.segments_client = self.network.SegmentsClient()
def _set_image_clients(self):
if CONF.service_available.glance:
@@ -82,10 +86,8 @@
self.schemas_client = self.image_v2.SchemasClient()
self.namespace_properties_client = \
self.image_v2.NamespacePropertiesClient()
- self.namespace_tags_client = \
- self.image_v2.NamespaceTagsClient()
- self.image_versions_client = \
- self.image_v2.VersionsClient()
+ self.namespace_tags_client = self.image_v2.NamespaceTagsClient()
+ self.image_versions_client = self.image_v2.VersionsClient()
def _set_compute_clients(self):
self.agents_client = self.compute.AgentsClient()
@@ -261,6 +263,8 @@
self.volume_v3.MessagesClient())
self.volume_versions_client_latest = (
self.volume_v3.VersionsClient())
+ self.attachments_client_latest = (
+ self.volume_v3.AttachmentsClient())
# TODO(gmann): Below alias for service clients have been
# deprecated and will be removed in future. Start using the alias
@@ -284,8 +288,7 @@
self.volume_v3.QuotaClassesClient()
self.volume_scheduler_stats_v2_client = \
self.volume_v3.SchedulerStatsClient()
- self.volume_transfers_v2_client = \
- self.volume_v3.TransfersClient()
+ self.volume_transfers_v2_client = self.volume_v3.TransfersClient()
self.volume_v2_availability_zone_client = \
self.volume_v3.AvailabilityZoneClient()
self.volume_v2_limits_client = self.volume_v3.LimitsClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index e41aa86..b230615 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -46,7 +46,6 @@
Username ``--os-username`` OS_USERNAME
Password ``--os-password`` OS_PASSWORD
Project ``--os-project-name`` OS_PROJECT_NAME
-Tenant ``--os-tenant-name`` (depr.) OS_TENANT_NAME
Domain ``--os-domain-name`` OS_DOMAIN_NAME
======== ============================ ====================
@@ -75,9 +74,6 @@
* ``--os-project-name <auth-project-name>`` (Optional) Project to request
authorization on. Defaults to env[OS_PROJECT_NAME].
-* ``--os-tenant-name <auth-tenant-name>`` (Optional, deprecated) Tenant to
- request authorization on. Defaults to env[OS_TENANT_NAME].
-
* ``--os-domain-name <auth-domain-name>`` (Optional) Domain the user and
project belong to. Defaults to env[OS_DOMAIN_NAME].
@@ -100,7 +96,7 @@
To see help on specific argument, please do: ``tempest account-generator
[OPTIONS] <accounts_file.yaml> -h``.
"""
-import argparse
+
import os
import traceback
@@ -139,7 +135,7 @@
'dhcp': True}
admin_creds_dict = {'username': opts.os_username,
'password': opts.os_password}
- _project_name = opts.os_project_name or opts.os_tenant_name
+ _project_name = opts.os_project_name
if opts.identity_version == 3:
admin_creds_dict['project_name'] = _project_name
admin_creds_dict['domain_name'] = opts.os_domain_name or 'Default'
@@ -162,7 +158,6 @@
if CONF.service_available.swift:
spec.append([CONF.object_storage.operator_role])
spec.append([CONF.object_storage.reseller_admin_role])
- spec.append([CONF.object_storage.operator_role])
if admin:
spec.append('admin')
resources = []
@@ -222,10 +217,6 @@
metavar='<auth-project-name>',
default=os.environ.get('OS_PROJECT_NAME'),
help='Defaults to env[OS_PROJECT_NAME].')
- parser.add_argument('--os-tenant-name',
- metavar='<auth-tenant-name>',
- default=os.environ.get('OS_TENANT_NAME'),
- help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os-domain-name',
metavar='<auth-domain-name>',
default=os.environ.get('OS_DOMAIN_NAME'),
@@ -257,21 +248,6 @@
help='Output accounts yaml file')
-def get_options():
- usage_string = ('tempest account-generator [-h] <ARG> ...\n\n'
- 'To see help on specific argument, do:\n'
- 'tempest account-generator <ARG> -h')
- parser = argparse.ArgumentParser(
- description=DESCRIPTION,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- usage=usage_string
- )
-
- _parser_add_args(parser)
- opts = parser.parse_args()
- return opts
-
-
class TempestAccountGenerator(command.Command):
def get_parser(self, prog_name):
@@ -281,7 +257,19 @@
def take_action(self, parsed_args):
try:
- main(parsed_args)
+ if parsed_args.config_file:
+ config.CONF.set_config_path(parsed_args.config_file)
+ setup_logging()
+ resources = []
+ for count in range(parsed_args.concurrency):
+ # Use N different cred_providers to obtain different
+ # sets of creds
+ cred_provider = get_credential_provider(parsed_args)
+ resources.extend(generate_resources(cred_provider,
+ parsed_args.admin))
+ dump_accounts(resources, parsed_args.identity_version,
+ parsed_args.accounts)
+
except Exception:
LOG.exception("Failure generating test accounts.")
traceback.print_exc()
@@ -289,27 +277,3 @@
def get_description(self):
return DESCRIPTION
-
-
-def main(opts=None):
- setup_logging()
- if not opts:
- LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
- "please use: 'tempest account-generator'")
- opts = get_options()
- if opts.config_file:
- config.CONF.set_config_path(opts.config_file)
- if opts.os_tenant_name:
- LOG.warning("'os-tenant-name' and 'OS_TENANT_NAME' are both "
- "deprecated, please use 'os-project-name' or "
- "'OS_PROJECT_NAME' instead")
- resources = []
- for count in range(opts.concurrency):
- # Use N different cred_providers to obtain different sets of creds
- cred_provider = get_credential_provider(opts)
- resources.extend(generate_resources(cred_provider, opts.admin))
- dump_accounts(resources, opts.identity_version, opts.accounts)
-
-
-if __name__ == "__main__":
- main()
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 29abd49..0b96d9e 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright 2014 Dell Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -72,6 +70,15 @@
deleted unless the ``--delete-tempest-conf-objects`` flag is used to
force their deletion.
+.. note::
+
+ If during execution of ``tempest cleanup`` NotImplemented exception
+ occurres, ``tempest cleanup`` won't fail on that, it will be logged only.
+ NotImplemented errors are ignored because they are an outcome of some
+ extensions being disabled and ``tempest cleanup`` is not checking their
+ availability as it tries to clean up as much as possible without any
+ complicated logic.
+
"""
import sys
import traceback
@@ -85,6 +92,7 @@
from tempest.common import credentials_factory as credentials
from tempest.common import identity
from tempest import config
+from tempest.lib import exceptions
SAVED_STATE_JSON = "saved_state.json"
DRY_RUN_JSON = "dry_run.json"
@@ -94,6 +102,8 @@
class TempestCleanup(command.Command):
+ GOT_EXCEPTIONS = []
+
def take_action(self, parsed_args):
try:
self.init(parsed_args)
@@ -103,8 +113,26 @@
LOG.exception("Failure during cleanup")
traceback.print_exc()
raise
+ # ignore NotImplemented errors as those are an outcome of some
+ # extensions being disabled and cleanup is not checking their
+ # availability as it tries to clean up as much as possible without
+ # any complicated logic
+ critical_exceptions = [ex for ex in self.GOT_EXCEPTIONS if
+ not isinstance(ex, exceptions.NotImplemented)]
+ if critical_exceptions:
+ raise Exception(self.GOT_EXCEPTIONS)
def init(self, parsed_args):
+ # set new handler for logging to stdout, by default only INFO messages
+ # are logged to stdout
+ stdout_handler = logging.logging.StreamHandler()
+ # debug argument is defined in cliff already
+ if self.app_args.debug:
+ stdout_handler.level = logging.DEBUG
+ else:
+ stdout_handler.level = logging.INFO
+ LOG.handlers.append(stdout_handler)
+
cleanup_service.init_conf()
self.options = parsed_args
self.admin_mgr = clients.Manager(
@@ -117,10 +145,11 @@
self.admin_project_id = ""
self._init_admin_ids()
- self.admin_role_added = []
-
# available services
- self.project_services = cleanup_service.get_project_cleanup_services()
+ self.project_associated_services = (
+ cleanup_service.get_project_associated_cleanup_services())
+ self.resource_cleanup_services = (
+ cleanup_service.get_resource_cleanup_services())
self.global_services = cleanup_service.get_global_cleanup_services()
if parsed_args.init_saved_state:
@@ -130,7 +159,7 @@
self._load_json()
def _cleanup(self):
- print("Begin cleanup")
+ LOG.info("Begin cleanup")
is_dry_run = self.options.dry_run
is_preserve = not self.options.delete_tempest_conf_objects
is_save_state = False
@@ -148,38 +177,35 @@
'is_save_state': is_save_state}
project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
projects = project_service.list()
- print("Process %s projects" % len(projects))
+ LOG.info("Processing %s projects", len(projects))
# Loop through list of projects and clean them up.
for project in projects:
- self._add_admin(project['id'])
self._clean_project(project)
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
- 'is_save_state': is_save_state}
+ 'is_save_state': is_save_state,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
+ LOG.info("Processing global services")
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
+ LOG.info("Processing services")
+ for service in self.resource_cleanup_services:
+ svc = service(self.admin_mgr, **kwargs)
+ svc.run()
+
if is_dry_run:
with open(DRY_RUN_JSON, 'w+') as f:
f.write(json.dumps(self.dry_run_data, sort_keys=True,
indent=2, separators=(',', ': ')))
- self._remove_admin_user_roles()
-
- def _remove_admin_user_roles(self):
- project_ids = self.admin_role_added
- LOG.debug("Removing admin user roles where needed for projects: %s",
- project_ids)
- for project_id in project_ids:
- self._remove_admin_role(project_id)
-
def _clean_project(self, project):
- print("Cleaning project: %s " % project['name'])
+ LOG.debug("Cleaning project: %s ", project['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
is_preserve = not self.options.delete_tempest_conf_objects
@@ -190,19 +216,15 @@
project_data = dry_run_data["_projects_to_clean"][project_id] = {}
project_data['name'] = project_name
- kwargs = {"username": CONF.auth.admin_username,
- "password": CONF.auth.admin_password,
- "project_name": project['name']}
- mgr = clients.Manager(credentials=credentials.get_credentials(
- **kwargs))
kwargs = {'data': project_data,
'is_dry_run': is_dry_run,
- 'saved_state_json': None,
+ 'saved_state_json': self.json_data,
'is_preserve': is_preserve,
'is_save_state': False,
- 'project_id': project_id}
- for service in self.project_services:
- svc = service(mgr, **kwargs)
+ 'project_id': project_id,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
+ for service in self.project_associated_services:
+ svc = service(self.admin_mgr, **kwargs)
svc.run()
def _init_admin_ids(self):
@@ -252,66 +274,35 @@
def get_description(self):
return 'Cleanup after tempest run'
- def _add_admin(self, project_id):
- rl_cl = self.admin_mgr.roles_v3_client
- needs_role = True
- roles = rl_cl.list_user_roles_on_project(project_id,
- self.admin_id)['roles']
- for role in roles:
- if role['id'] == self.admin_role_id:
- needs_role = False
- LOG.debug("User already had admin privilege for this project")
- if needs_role:
- LOG.debug("Adding admin privilege for : %s", project_id)
- rl_cl.create_user_role_on_project(project_id, self.admin_id,
- self.admin_role_id)
- self.admin_role_added.append(project_id)
-
- def _remove_admin_role(self, project_id):
- LOG.debug("Remove admin user role for projectt: %s", project_id)
- # Must initialize Admin Manager for each user role
- # Otherwise authentication exception is thrown, weird
- id_cl = clients.Manager(
- credentials.get_configured_admin_credentials()).identity_client
- if (self._project_exists(project_id)):
- try:
- id_cl.delete_role_from_user_on_project(project_id,
- self.admin_id,
- self.admin_role_id)
- except Exception as ex:
- LOG.exception("Failed removing role from project which still"
- "exists, exception: %s", ex)
-
- def _project_exists(self, project_id):
- pr_cl = self.admin_mgr.projects_client
- try:
- p = pr_cl.show_project(project_id)
- LOG.debug("Project is: %s", str(p))
- return True
- except Exception as ex:
- LOG.debug("Project no longer exists? %s", ex)
- return False
-
def _init_state(self):
- print("Initializing saved state.")
+ LOG.info("Initializing saved state.")
data = {}
admin_mgr = self.admin_mgr
kwargs = {'data': data,
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
- 'is_save_state': True}
+ 'is_save_state': True,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
- with open(SAVED_STATE_JSON, 'w+') as f:
- f.write(json.dumps(data,
- sort_keys=True, indent=2, separators=(',', ': ')))
+ for service in self.project_associated_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
- def _load_json(self):
+ for service in self.resource_cleanup_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
+
+ with open(SAVED_STATE_JSON, 'w+') as f:
+ f.write(json.dumps(data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
+
+ def _load_json(self, saved_state_json=SAVED_STATE_JSON):
try:
- with open(SAVED_STATE_JSON) as json_file:
+ with open(saved_state_json, 'rb') as json_file:
self.json_data = json.load(json_file)
except IOError as ex:
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 1a08246..84d2492 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,6 +13,7 @@
# under the License.
from oslo_log import log as logging
+from six.moves.urllib import parse as urllib
from tempest import clients
from tempest.common import credentials_factory as credentials
@@ -22,8 +21,9 @@
from tempest.common import utils
from tempest.common.utils import net_info
from tempest import config
+from tempest.lib import exceptions
-LOG = logging.getLogger(__name__)
+LOG = logging.getLogger('tempest.cmd.cleanup')
CONF = config.CONF
CONF_FLAVORS = None
@@ -101,7 +101,7 @@
self.tenant_filter = {}
if hasattr(self, 'tenant_id'):
- self.tenant_filter['tenant_id'] = self.tenant_id
+ self.tenant_filter['project_id'] = self.tenant_id
def _filter_by_tenant_id(self, item_list):
if (item_list is None or
@@ -127,12 +127,23 @@
pass
def run(self):
- if self.is_dry_run:
- self.dry_run()
- elif self.is_save_state:
- self.save_state()
- else:
- self.delete()
+ try:
+ if self.is_dry_run:
+ self.dry_run()
+ elif self.is_save_state:
+ self.save_state()
+ else:
+ self.delete()
+ except exceptions.NotImplemented as exc:
+ # Many OpenStack services use extensions logic to implement the
+ # features or resources. Tempest cleanup tries to clean up the test
+ # resources without having much logic of extensions checks etc.
+ # If any of the extension is missing then, service will return
+ # NotImplemented error.
+ msg = ("Got NotImplemented error in %s, full exception: %s" %
+ (str(self.__class__), str(exc)))
+ LOG.exception(msg)
+ self.got_exceptions.append(exc)
class SnapshotService(BaseService):
@@ -144,6 +155,10 @@
def list(self):
client = self.client
snaps = client.list_snapshots()['snapshots']
+ if not self.is_save_state:
+ # recreate list removing saved snapshots
+ snaps = [snap for snap in snaps if snap['id']
+ not in self.saved_state_json['snapshots'].keys()]
LOG.debug("List count, %s Snapshots", len(snaps))
return snaps
@@ -152,14 +167,21 @@
client = self.client
for snap in snaps:
try:
+ LOG.debug("Deleting Snapshot with id %s", snap['id'])
client.delete_snapshot(snap['id'])
except Exception:
- LOG.exception("Delete Snapshot exception.")
+ LOG.exception("Delete Snapshot %s exception.", snap['id'])
def dry_run(self):
snaps = self.list()
self.data['snapshots'] = snaps
+ def save_state(self):
+ snaps = self.list()
+ self.data['snapshots'] = {}
+ for snap in snaps:
+ self.data['snapshots'][snap['id']] = snap['name']
+
class ServerService(BaseService):
def __init__(self, manager, **kwargs):
@@ -171,6 +193,10 @@
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
+ if not self.is_save_state:
+ # recreate list removing saved servers
+ servers = [server for server in servers if server['id']
+ not in self.saved_state_json['servers'].keys()]
LOG.debug("List count, %s Servers", len(servers))
return servers
@@ -179,36 +205,54 @@
servers = self.list()
for server in servers:
try:
+ LOG.debug("Deleting Server with id %s", server['id'])
client.delete_server(server['id'])
except Exception:
- LOG.exception("Delete Server exception.")
+ LOG.exception("Delete Server %s exception.", server['id'])
def dry_run(self):
servers = self.list()
self.data['servers'] = servers
+ def save_state(self):
+ servers = self.list()
+ self.data['servers'] = {}
+ for server in servers:
+ self.data['servers'][server['id']] = server['name']
+
class ServerGroupService(ServerService):
def list(self):
client = self.server_groups_client
sgs = client.list_server_groups()['server_groups']
+ if not self.is_save_state:
+ # recreate list removing saved server_groups
+ sgs = [sg for sg in sgs if sg['id']
+ not in self.saved_state_json['server_groups'].keys()]
LOG.debug("List count, %s Server Groups", len(sgs))
return sgs
def delete(self):
- client = self.client
+ client = self.server_groups_client
sgs = self.list()
for sg in sgs:
try:
+ LOG.debug("Deleting Server Group with id %s", sg['id'])
client.delete_server_group(sg['id'])
except Exception:
- LOG.exception("Delete Server Group exception.")
+ LOG.exception("Delete Server Group %s exception.", sg['id'])
def dry_run(self):
sgs = self.list()
self.data['server_groups'] = sgs
+ def save_state(self):
+ sgs = self.list()
+ self.data['server_groups'] = {}
+ for sg in sgs:
+ self.data['server_groups'][sg['id']] = sg['name']
+
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
@@ -218,6 +262,11 @@
def list(self):
client = self.client
keypairs = client.list_keypairs()['keypairs']
+ if not self.is_save_state:
+ # recreate list removing saved keypairs
+ keypairs = [keypair for keypair in keypairs
+ if keypair['keypair']['name']
+ not in self.saved_state_json['keypairs'].keys()]
LOG.debug("List count, %s Keypairs", len(keypairs))
return keypairs
@@ -225,66 +274,23 @@
client = self.client
keypairs = self.list()
for k in keypairs:
+ name = k['keypair']['name']
try:
- name = k['keypair']['name']
+ LOG.debug("Deleting keypair %s", name)
client.delete_keypair(name)
except Exception:
- LOG.exception("Delete Keypairs exception.")
+ LOG.exception("Delete Keypair %s exception.", name)
def dry_run(self):
keypairs = self.list()
self.data['keypairs'] = keypairs
-
-class SecurityGroupService(BaseService):
- def __init__(self, manager, **kwargs):
- super(SecurityGroupService, self).__init__(kwargs)
- self.client = manager.compute_security_groups_client
-
- def list(self):
- client = self.client
- secgrps = client.list_security_groups()['security_groups']
- secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
- LOG.debug("List count, %s Security Groups", len(secgrp_del))
- return secgrp_del
-
- def delete(self):
- client = self.client
- secgrp_del = self.list()
- for g in secgrp_del:
- try:
- client.delete_security_group(g['id'])
- except Exception:
- LOG.exception("Delete Security Groups exception.")
-
- def dry_run(self):
- secgrp_del = self.list()
- self.data['security_groups'] = secgrp_del
-
-
-class FloatingIpService(BaseService):
- def __init__(self, manager, **kwargs):
- super(FloatingIpService, self).__init__(kwargs)
- self.client = manager.compute_floating_ips_client
-
- def list(self):
- client = self.client
- floating_ips = client.list_floating_ips()['floating_ips']
- LOG.debug("List count, %s Floating IPs", len(floating_ips))
- return floating_ips
-
- def delete(self):
- client = self.client
- floating_ips = self.list()
- for f in floating_ips:
- try:
- client.delete_floating_ip(f['id'])
- except Exception:
- LOG.exception("Delete Floating IPs exception.")
-
- def dry_run(self):
- floating_ips = self.list()
- self.data['floating_ips'] = floating_ips
+ def save_state(self):
+ keypairs = self.list()
+ self.data['keypairs'] = {}
+ for keypair in keypairs:
+ keypair = keypair['keypair']
+ self.data['keypairs'][keypair['name']] = keypair
class VolumeService(BaseService):
@@ -295,6 +301,10 @@
def list(self):
client = self.client
vols = client.list_volumes()['volumes']
+ if not self.is_save_state:
+ # recreate list removing saved volumes
+ vols = [vol for vol in vols if vol['id']
+ not in self.saved_state_json['volumes'].keys()]
LOG.debug("List count, %s Volumes", len(vols))
return vols
@@ -303,14 +313,21 @@
vols = self.list()
for v in vols:
try:
+ LOG.debug("Deleting volume with id %s", v['id'])
client.delete_volume(v['id'])
except Exception:
- LOG.exception("Delete Volume exception.")
+ LOG.exception("Delete Volume %s exception.", v['id'])
def dry_run(self):
vols = self.list()
self.data['volumes'] = vols
+ def save_state(self):
+ vols = self.list()
+ self.data['volumes'] = {}
+ for vol in vols:
+ self.data['volumes'][vol['id']] = vol['name']
+
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
@@ -320,13 +337,16 @@
def delete(self):
client = self.client
try:
- client.delete_quota_set(self.tenant_id)
+ LOG.debug("Deleting Volume Quotas for project with id %s",
+ self.project_id)
+ client.delete_quota_set(self.project_id)
except Exception:
- LOG.exception("Delete Volume Quotas exception.")
+ LOG.exception("Delete Volume Quotas exception for 'project %s'.",
+ self.project_id)
def dry_run(self):
quotas = self.client.show_quota_set(
- self.tenant_id, params={'usage': True})['quota_set']
+ self.project_id, params={'usage': True})['quota_set']
self.data['volume_quotas'] = quotas
@@ -339,9 +359,12 @@
def delete(self):
client = self.client
try:
- client.delete_quota_set(self.tenant_id)
+ LOG.debug("Deleting Nova Quotas for project with id %s",
+ self.project_id)
+ client.delete_quota_set(self.project_id)
except Exception:
- LOG.exception("Delete Quotas exception.")
+ LOG.exception("Delete Nova Quotas exception for 'project %s'.",
+ self.project_id)
def dry_run(self):
client = self.limits_client
@@ -349,10 +372,31 @@
self.data['compute_quotas'] = quotas['absolute']
-# Begin network service classes
-class NetworkService(BaseService):
+class NetworkQuotaService(BaseService):
def __init__(self, manager, **kwargs):
- super(NetworkService, self).__init__(kwargs)
+ super(NetworkQuotaService, self).__init__(kwargs)
+ self.client = manager.network_quotas_client
+
+ def delete(self):
+ client = self.client
+ try:
+ LOG.debug("Deleting Network Quotas for project with id %s",
+ self.project_id)
+ client.reset_quotas(self.project_id)
+ except Exception:
+ LOG.exception("Delete Network Quotas exception for 'project %s'.",
+ self.project_id)
+
+ def dry_run(self):
+ resp = [quota for quota in self.client.list_quotas()['quotas']
+ if quota['project_id'] == self.project_id]
+ self.data['network_quotas'] = resp
+
+
+# Begin network service classes
+class BaseNetworkService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(BaseNetworkService, self).__init__(kwargs)
self.networks_client = manager.networks_client
self.subnets_client = manager.subnets_client
self.ports_client = manager.ports_client
@@ -361,6 +405,7 @@
self.metering_label_rules_client = manager.metering_label_rules_client
self.security_groups_client = manager.security_groups_client
self.routers_client = manager.routers_client
+ self.subnetpools_client = manager.subnetpools_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
@@ -369,15 +414,23 @@
return [item for item in item_list if item['network_id']
not in CONF_NETWORKS]
+
+class NetworkService(BaseNetworkService):
+
def list(self):
client = self.networks_client
networks = client.list_networks(**self.tenant_filter)
networks = networks['networks']
+
+ if not self.is_save_state:
+ # recreate list removing saved networks
+ networks = [network for network in networks if network['id']
+ not in self.saved_state_json['networks'].keys()]
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
- LOG.debug("List count, %s Networks", networks)
+ LOG.debug("List count, %s Networks", len(networks))
return networks
def delete(self):
@@ -385,44 +438,70 @@
networks = self.list()
for n in networks:
try:
+ LOG.debug("Deleting Network with id %s", n['id'])
client.delete_network(n['id'])
except Exception:
- LOG.exception("Delete Network exception.")
+ LOG.exception("Delete Network %s exception.", n['id'])
def dry_run(self):
networks = self.list()
self.data['networks'] = networks
+ def save_state(self):
+ networks = self.list()
+ self.data['networks'] = {}
+ for network in networks:
+ self.data['networks'][network['id']] = network
-class NetworkFloatingIpService(NetworkService):
+
+class NetworkFloatingIpService(BaseNetworkService):
def list(self):
client = self.floating_ips_client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
+
+ if not self.is_save_state:
+ # recreate list removing saved flips
+ flips = [flip for flip in flips if flip['id']
+ not in self.saved_state_json['floatingips'].keys()]
LOG.debug("List count, %s Network Floating IPs", len(flips))
return flips
def delete(self):
- client = self.client
+ client = self.floating_ips_client
flips = self.list()
for flip in flips:
try:
+ LOG.debug("Deleting Network Floating IP with id %s",
+ flip['id'])
client.delete_floatingip(flip['id'])
except Exception:
- LOG.exception("Delete Network Floating IP exception.")
+ LOG.exception("Delete Network Floating IP %s exception.",
+ flip['id'])
def dry_run(self):
flips = self.list()
- self.data['floating_ips'] = flips
+ self.data['floatingips'] = flips
+
+ def save_state(self):
+ flips = self.list()
+ self.data['floatingips'] = {}
+ for flip in flips:
+ self.data['floatingips'][flip['id']] = flip
-class NetworkRouterService(NetworkService):
+class NetworkRouterService(BaseNetworkService):
def list(self):
client = self.routers_client
routers = client.list_routers(**self.tenant_filter)
routers = routers['routers']
+
+ if not self.is_save_state:
+ # recreate list removing saved routers
+ routers = [router for router in routers if router['id']
+ not in self.saved_state_json['routers'].keys()]
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
@@ -435,116 +514,33 @@
ports_client = self.ports_client
routers = self.list()
for router in routers:
- try:
- rid = router['id']
- ports = [port for port
- in ports_client.list_ports(device_id=rid)['ports']
- if net_info.is_router_interface_port(port)]
- for port in ports:
+ rid = router['id']
+ ports = [port for port
+ in ports_client.list_ports(device_id=rid)['ports']
+ if net_info.is_router_interface_port(port)]
+ for port in ports:
+ try:
+ LOG.debug("Deleting port with id %s of router with id %s",
+ port['id'], rid)
client.remove_router_interface(rid, port_id=port['id'])
+ except Exception:
+ LOG.exception("Delete Router Interface exception for "
+ "'port %s' of 'router %s'.", port['id'], rid)
+ try:
+ LOG.debug("Deleting Router with id %s", rid)
client.delete_router(rid)
except Exception:
- LOG.exception("Delete Router exception.")
+ LOG.exception("Delete Router %s exception.", rid)
def dry_run(self):
routers = self.list()
self.data['routers'] = routers
-
-class NetworkHealthMonitorService(NetworkService):
-
- def list(self):
- client = self.client
- hms = client.list_health_monitors()
- hms = hms['health_monitors']
- hms = self._filter_by_tenant_id(hms)
- LOG.debug("List count, %s Health Monitors", len(hms))
- return hms
-
- def delete(self):
- client = self.client
- hms = self.list()
- for hm in hms:
- try:
- client.delete_health_monitor(hm['id'])
- except Exception:
- LOG.exception("Delete Health Monitor exception.")
-
- def dry_run(self):
- hms = self.list()
- self.data['health_monitors'] = hms
-
-
-class NetworkMemberService(NetworkService):
-
- def list(self):
- client = self.client
- members = client.list_members()
- members = members['members']
- members = self._filter_by_tenant_id(members)
- LOG.debug("List count, %s Members", len(members))
- return members
-
- def delete(self):
- client = self.client
- members = self.list()
- for member in members:
- try:
- client.delete_member(member['id'])
- except Exception:
- LOG.exception("Delete Member exception.")
-
- def dry_run(self):
- members = self.list()
- self.data['members'] = members
-
-
-class NetworkVipService(NetworkService):
-
- def list(self):
- client = self.client
- vips = client.list_vips()
- vips = vips['vips']
- vips = self._filter_by_tenant_id(vips)
- LOG.debug("List count, %s VIPs", len(vips))
- return vips
-
- def delete(self):
- client = self.client
- vips = self.list()
- for vip in vips:
- try:
- client.delete_vip(vip['id'])
- except Exception:
- LOG.exception("Delete VIP exception.")
-
- def dry_run(self):
- vips = self.list()
- self.data['vips'] = vips
-
-
-class NetworkPoolService(NetworkService):
-
- def list(self):
- client = self.client
- pools = client.list_pools()
- pools = pools['pools']
- pools = self._filter_by_tenant_id(pools)
- LOG.debug("List count, %s Pools", len(pools))
- return pools
-
- def delete(self):
- client = self.client
- pools = self.list()
- for pool in pools:
- try:
- client.delete_pool(pool['id'])
- except Exception:
- LOG.exception("Delete Pool exception.")
-
- def dry_run(self):
- pools = self.list()
- self.data['pools'] = pools
+ def save_state(self):
+ routers = self.list()
+ self.data['routers'] = {}
+ for router in routers:
+ self.data['routers'][router['id']] = router['name']
class NetworkMeteringLabelRuleService(NetworkService):
@@ -554,6 +550,11 @@
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
+
+ if not self.is_save_state:
+ saved_rules = self.saved_state_json['metering_label_rules'].keys()
+ # recreate list removing saved rules
+ rules = [rule for rule in rules if rule['id'] not in saved_rules]
LOG.debug("List count, %s Metering Label Rules", len(rules))
return rules
@@ -562,22 +563,36 @@
rules = self.list()
for rule in rules:
try:
+ LOG.debug("Deleting Metering Label Rule with id %s",
+ rule['id'])
client.delete_metering_label_rule(rule['id'])
except Exception:
- LOG.exception("Delete Metering Label Rule exception.")
+ LOG.exception("Delete Metering Label Rule %s exception.",
+ rule['id'])
def dry_run(self):
rules = self.list()
- self.data['rules'] = rules
+ self.data['metering_label_rules'] = rules
+
+ def save_state(self):
+ rules = self.list()
+ self.data['metering_label_rules'] = {}
+ for rule in rules:
+ self.data['metering_label_rules'][rule['id']] = rule
-class NetworkMeteringLabelService(NetworkService):
+class NetworkMeteringLabelService(BaseNetworkService):
def list(self):
client = self.metering_labels_client
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
+
+ if not self.is_save_state:
+ # recreate list removing saved labels
+ labels = [label for label in labels if label['id']
+ not in self.saved_state_json['metering_labels'].keys()]
LOG.debug("List count, %s Metering Labels", len(labels))
return labels
@@ -586,16 +601,24 @@
labels = self.list()
for label in labels:
try:
+ LOG.debug("Deleting Metering Label with id %s", label['id'])
client.delete_metering_label(label['id'])
except Exception:
- LOG.exception("Delete Metering Label exception.")
+ LOG.exception("Delete Metering Label %s exception.",
+ label['id'])
def dry_run(self):
labels = self.list()
- self.data['labels'] = labels
+ self.data['metering_labels'] = labels
+
+ def save_state(self):
+ labels = self.list()
+ self.data['metering_labels'] = {}
+ for label in labels:
+ self.data['metering_labels'][label['id']] = label['name']
-class NetworkPortService(NetworkService):
+class NetworkPortService(BaseNetworkService):
def list(self):
client = self.ports_client
@@ -604,6 +627,10 @@
if port["device_owner"] == "" or
port["device_owner"].startswith("compute:")]
+ if not self.is_save_state:
+ # recreate list removing saved ports
+ ports = [port for port in ports if port['id']
+ not in self.saved_state_json['ports'].keys()]
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
@@ -615,16 +642,23 @@
ports = self.list()
for port in ports:
try:
+ LOG.debug("Deleting port with id %s", port['id'])
client.delete_port(port['id'])
except Exception:
- LOG.exception("Delete Port exception.")
+ LOG.exception("Delete Port %s exception.", port['id'])
def dry_run(self):
ports = self.list()
self.data['ports'] = ports
+ def save_state(self):
+ ports = self.list()
+ self.data['ports'] = {}
+ for port in ports:
+ self.data['ports'][port['id']] = port['name']
-class NetworkSecGroupService(NetworkService):
+
+class NetworkSecGroupService(BaseNetworkService):
def list(self):
client = self.security_groups_client
filter = self.tenant_filter
@@ -633,31 +667,50 @@
client.list_security_groups(**filter)['security_groups']
if secgroup['name'] != 'default']
+ if not self.is_save_state:
+ # recreate list removing saved security_groups
+ secgroups = [secgroup for secgroup in secgroups if secgroup['id']
+ not in self.saved_state_json['security_groups'].keys()
+ ]
if self.is_preserve:
- secgroups = self._filter_by_conf_networks(secgroups)
+ secgroups = [secgroup for secgroup in secgroups
+ if secgroup['security_group_rules'][0]['project_id']
+ not in CONF_PROJECTS]
LOG.debug("List count, %s security_groups", len(secgroups))
return secgroups
def delete(self):
- client = self.client
+ client = self.security_groups_client
secgroups = self.list()
for secgroup in secgroups:
try:
- client.delete_secgroup(secgroup['id'])
+ LOG.debug("Deleting security_group with id %s", secgroup['id'])
+ client.delete_security_group(secgroup['id'])
except Exception:
- LOG.exception("Delete security_group exception.")
+ LOG.exception("Delete security_group %s exception.",
+ secgroup['id'])
def dry_run(self):
secgroups = self.list()
- self.data['secgroups'] = secgroups
+ self.data['security_groups'] = secgroups
+
+ def save_state(self):
+ secgroups = self.list()
+ self.data['security_groups'] = {}
+ for secgroup in secgroups:
+ self.data['security_groups'][secgroup['id']] = secgroup['name']
-class NetworkSubnetService(NetworkService):
+class NetworkSubnetService(BaseNetworkService):
def list(self):
client = self.subnets_client
subnets = client.list_subnets(**self.tenant_filter)
subnets = subnets['subnets']
+ if not self.is_save_state:
+ # recreate list removing saved subnets
+ subnets = [subnet for subnet in subnets if subnet['id']
+ not in self.saved_state_json['subnets'].keys()]
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets", len(subnets))
@@ -668,16 +721,100 @@
subnets = self.list()
for subnet in subnets:
try:
+ LOG.debug("Deleting subnet with id %s", subnet['id'])
client.delete_subnet(subnet['id'])
except Exception:
- LOG.exception("Delete Subnet exception.")
+ LOG.exception("Delete Subnet %s exception.", subnet['id'])
def dry_run(self):
subnets = self.list()
self.data['subnets'] = subnets
+ def save_state(self):
+ subnets = self.list()
+ self.data['subnets'] = {}
+ for subnet in subnets:
+ self.data['subnets'][subnet['id']] = subnet['name']
+
+
+class NetworkSubnetPoolsService(BaseNetworkService):
+
+ def list(self):
+ client = self.subnetpools_client
+ pools = client.list_subnetpools(**self.tenant_filter)['subnetpools']
+ if not self.is_save_state:
+ # recreate list removing saved subnet pools
+ pools = [pool for pool in pools if pool['id']
+ not in self.saved_state_json['subnetpools'].keys()]
+ if self.is_preserve:
+ pools = [pool for pool in pools if pool['project_id']
+ not in CONF_PROJECTS]
+ LOG.debug("List count, %s Subnet Pools", len(pools))
+ return pools
+
+ def delete(self):
+ client = self.subnetpools_client
+ pools = self.list()
+ for pool in pools:
+ try:
+ LOG.debug("Deleting Subnet Pool with id %s", pool['id'])
+ client.delete_subnetpool(pool['id'])
+ except Exception:
+ LOG.exception("Delete Subnet Pool %s exception.", pool['id'])
+
+ def dry_run(self):
+ pools = self.list()
+ self.data['subnetpools'] = pools
+
+ def save_state(self):
+ pools = self.list()
+ self.data['subnetpools'] = {}
+ for pool in pools:
+ self.data['subnetpools'][pool['id']] = pool['name']
+
# begin global services
+class RegionService(BaseService):
+
+ def __init__(self, manager, **kwargs):
+ super(RegionService, self).__init__(kwargs)
+ self.client = manager.regions_client
+
+ def list(self):
+ client = self.client
+ regions = client.list_regions()
+ if not self.is_save_state:
+ regions = [region for region in regions['regions'] if region['id']
+ not in self.saved_state_json['regions'].keys()]
+ LOG.debug("List count, %s Regions", len(regions))
+ return regions
+ else:
+ LOG.debug("List count, %s Regions", len(regions['regions']))
+ return regions['regions']
+
+ def delete(self):
+ client = self.client
+ regions = self.list()
+ for region in regions:
+ try:
+ LOG.debug("Deleting region with id %s", region['id'])
+ client.delete_region(region['id'])
+ except Exception:
+ LOG.exception("Delete Region %s exception.", region['id'])
+
+ def dry_run(self):
+ regions = self.list()
+ self.data['regions'] = {}
+ for region in regions:
+ self.data['regions'][region['id']] = region
+
+ def save_state(self):
+ regions = self.list()
+ self.data['regions'] = {}
+ for region in regions:
+ self.data['regions'][region['id']] = region
+
+
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
super(FlavorService, self).__init__(kwargs)
@@ -702,9 +839,10 @@
flavors = self.list()
for flavor in flavors:
try:
+ LOG.debug("Deleting flavor with id %s", flavor['id'])
client.delete_flavor(flavor['id'])
except Exception:
- LOG.exception("Delete Flavor exception.")
+ LOG.exception("Delete Flavor %s exception.", flavor['id'])
def dry_run(self):
flavors = self.list()
@@ -724,7 +862,15 @@
def list(self):
client = self.client
- images = client.list_images(params={"all_tenants": True})['images']
+ response = client.list_images()
+ images = []
+ images.extend(response['images'])
+ while 'next' in response:
+ parsed = urllib.urlparse(response['next'])
+ marker = urllib.parse_qs(parsed.query)['marker'][0]
+ response = client.list_images(params={"marker": marker})
+ images.extend(response['images'])
+
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
@@ -739,9 +885,10 @@
images = self.list()
for image in images:
try:
+ LOG.debug("Deleting image with id %s", image['id'])
client.delete_image(image['id'])
except Exception:
- LOG.exception("Delete Image exception.")
+ LOG.exception("Delete Image %s exception.", image['id'])
def dry_run(self):
images = self.list()
@@ -754,12 +901,6 @@
self.data['images'][image['id']] = image['name']
-class IdentityService(BaseService):
- def __init__(self, manager, **kwargs):
- super(IdentityService, self).__init__(kwargs)
- self.client = manager.identity_v3_client
-
-
class UserService(BaseService):
def __init__(self, manager, **kwargs):
@@ -788,9 +929,10 @@
users = self.list()
for user in users:
try:
+ LOG.debug("Deleting user with id %s", user['id'])
self.client.delete_user(user['id'])
except Exception:
- LOG.exception("Delete User exception.")
+ LOG.exception("Delete User %s exception.", user['id'])
def dry_run(self):
users = self.list()
@@ -807,7 +949,7 @@
def __init__(self, manager, **kwargs):
super(RoleService, self).__init__(kwargs)
- self.client = manager.roles_client
+ self.client = manager.roles_v3_client
def list(self):
try:
@@ -828,9 +970,10 @@
roles = self.list()
for role in roles:
try:
+ LOG.debug("Deleting role with id %s", role['id'])
self.client.delete_role(role['id'])
except Exception:
- LOG.exception("Delete Role exception.")
+ LOG.exception("Delete Role %s exception.", role['id'])
def dry_run(self):
roles = self.list()
@@ -870,9 +1013,10 @@
projects = self.list()
for project in projects:
try:
+ LOG.debug("Deleting project with id %s", project['id'])
self.client.delete_project(project['id'])
except Exception:
- LOG.exception("Delete project exception.")
+ LOG.exception("Delete project %s exception.", project['id'])
def dry_run(self):
projects = self.list()
@@ -906,10 +1050,11 @@
domains = self.list()
for domain in domains:
try:
+ LOG.debug("Deleting domain with id %s", domain['id'])
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception:
- LOG.exception("Delete Domain exception.")
+ LOG.exception("Delete Domain %s exception.", domain['id'])
def dry_run(self):
domains = self.list()
@@ -922,33 +1067,54 @@
self.data['domains'][domain['id']] = domain['name']
-def get_project_cleanup_services():
- project_services = []
+def get_project_associated_cleanup_services():
+ """Returns list of project service classes.
+
+ The list contains services whose resources need to be deleted prior,
+ the project they are associated with, deletion. The resources cannot be
+ most likely deleted after the project is deleted first.
+ """
+ project_associated_services = []
# TODO(gmann): Tempest should provide some plugin hook for cleanup
# script extension to plugin tests also.
if IS_NOVA:
- project_services.append(ServerService)
- project_services.append(KeyPairService)
- project_services.append(SecurityGroupService)
- project_services.append(ServerGroupService)
- if not IS_NEUTRON:
- project_services.append(FloatingIpService)
- project_services.append(NovaQuotaService)
- if IS_NEUTRON:
- project_services.append(NetworkFloatingIpService)
- if utils.is_extension_enabled('metering', 'network'):
- project_services.append(NetworkMeteringLabelRuleService)
- project_services.append(NetworkMeteringLabelService)
- project_services.append(NetworkRouterService)
- project_services.append(NetworkPortService)
- project_services.append(NetworkSubnetService)
- project_services.append(NetworkService)
- project_services.append(NetworkSecGroupService)
+ project_associated_services.append(NovaQuotaService)
if IS_CINDER:
- project_services.append(SnapshotService)
- project_services.append(VolumeService)
- project_services.append(VolumeQuotaService)
- return project_services
+ project_associated_services.append(VolumeQuotaService)
+ if IS_NEUTRON:
+ project_associated_services.append(NetworkQuotaService)
+ return project_associated_services
+
+
+def get_resource_cleanup_services():
+ """Returns list of project related classes.
+
+ The list contains services whose resources are associated with a project,
+ however, their deletion is possible also after the project is deleted
+ first.
+ """
+ resource_cleanup_services = []
+ # TODO(gmann): Tempest should provide some plugin hook for cleanup
+ # script extension to plugin tests also.
+ if IS_NOVA:
+ resource_cleanup_services.append(ServerService)
+ resource_cleanup_services.append(KeyPairService)
+ resource_cleanup_services.append(ServerGroupService)
+ if IS_NEUTRON:
+ resource_cleanup_services.append(NetworkFloatingIpService)
+ if utils.is_extension_enabled('metering', 'network'):
+ resource_cleanup_services.append(NetworkMeteringLabelRuleService)
+ resource_cleanup_services.append(NetworkMeteringLabelService)
+ resource_cleanup_services.append(NetworkRouterService)
+ resource_cleanup_services.append(NetworkPortService)
+ resource_cleanup_services.append(NetworkSubnetService)
+ resource_cleanup_services.append(NetworkService)
+ resource_cleanup_services.append(NetworkSecGroupService)
+ resource_cleanup_services.append(NetworkSubnetPoolsService)
+ if IS_CINDER:
+ resource_cleanup_services.append(SnapshotService)
+ resource_cleanup_services.append(VolumeService)
+ return resource_cleanup_services
def get_global_cleanup_services():
@@ -961,4 +1127,5 @@
global_services.append(ProjectService)
global_services.append(DomainService)
global_services.append(RoleService)
+ global_services.append(RegionService)
return global_services
diff --git a/tempest/cmd/list_plugins.py b/tempest/cmd/list_plugins.py
index 86732da..51decc7 100644
--- a/tempest/cmd/list_plugins.py
+++ b/tempest/cmd/list_plugins.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 84c6d9a..d82b6df 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -19,11 +19,11 @@
==============
Tempest run has several options:
- * **--regex/-r**: This is a selection regex like what stestr uses. It will run
- any tests that match on re.match() with the regex
- * **--smoke/-s**: Run all the tests tagged as smoke
- * **--black-regex**: It allows to do simple test exclusion via passing a
- rejection/black regexp
+* ``--regex/-r``: This is a selection regex like what stestr uses. It will run
+ any tests that match on re.match() with the regex
+* ``--smoke/-s``: Run all the tests tagged as smoke
+* ``--black-regex``: It allows to do simple test exclusion via passing a
+ rejection/black regexp
There are also the ``--blacklist-file`` and ``--whitelist-file`` options that
let you pass a filepath to tempest run with the file format being a line
@@ -47,6 +47,42 @@
by removing unnecessary tests from a list file which is generated from
``--list-tests`` option.
+You can also use ``--worker-file`` option that let you pass a filepath to a
+worker yaml file, allowing you to manually schedule the tests run.
+For example, you can setup a tempest run with
+different concurrences to be used with different regexps.
+An example of worker file is showed below::
+
+ # YAML Worker file
+ - worker:
+ # you can have more than one regex per worker
+ - tempest.api.*
+ - neutron_tempest_tests
+ - worker:
+ - tempest.scenario.*
+
+This will run test matching with 'tempest.api.*' and 'neutron_tempest_tests'
+against worker 1. Run tests matching with 'tempest.scenario.*' under worker 2.
+
+You can mix manual scheduling with the standard scheduling mechanisms by
+concurrency field on a worker. For example::
+
+ # YAML Worker file
+ - worker:
+ # you can have more than one regex per worker
+ - tempest.api.*
+ - neutron_tempest_tests
+ concurrency: 3
+ - worker:
+ - tempest.scenario.*
+ concurrency: 2
+
+This will run tests matching with 'tempest.scenario.*' against 2 workers.
+
+This worker file is passed into stestr. For some more details on how it
+operates please refer to the stestr scheduling docs:
+https://stestr.readthedocs.io/en/stable/MANUAL.html#test-scheduling
+
Test Execution
==============
There are several options to control how the tests are executed. By default
@@ -103,6 +139,9 @@
from tempest.common import credentials_factory as credentials
from tempest import config
+if six.PY2:
+ # Python 2 has not FileNotFoundError exception
+ FileNotFoundError = IOError
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
@@ -112,7 +151,12 @@
def _set_env(self, config_file=None):
if config_file:
- CONF.set_config_path(os.path.abspath(config_file))
+ if os.path.exists(os.path.abspath(config_file)):
+ CONF.set_config_path(os.path.abspath(config_file))
+ else:
+ raise FileNotFoundError(
+ "Config file: %s doesn't exist" % config_file)
+
# NOTE(mtreinish): This is needed so that stestr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
@@ -177,6 +221,7 @@
blacklist_file=parsed_args.blacklist_file,
whitelist_file=parsed_args.whitelist_file,
black_regex=parsed_args.black_regex,
+ worker_path=parsed_args.worker_file,
load_list=parsed_args.load_list, combine=parsed_args.combine)
if return_code > 0:
sys.exit(return_code)
@@ -202,8 +247,8 @@
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
- f.write(json.dumps(data,
- sort_keys=True, indent=2, separators=(',', ': ')))
+ f.write(json.dumps(data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
@@ -243,15 +288,20 @@
parser.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
'this file contains a separate test '
- 'on each newline. This command'
- 'supports files created by the tempest'
+ 'on each newline. This command '
+ 'supports files created by the tempest '
'run ``--list-tests`` command')
+ parser.add_argument('--worker-file', '--worker_file',
+ help='Optional path to a worker file. This file '
+ 'contains each worker configuration to be '
+ 'used to schedule the tests run')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
+ type=int, default=0,
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 8dcf575..e029538 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -78,16 +78,22 @@
import argparse
import collections
import io
-import json
import os
import re
import sys
+import traceback
+from cliff.command import Command
+from oslo_serialization import jsonutils as json
import subunit
import testtools
+DESCRIPTION = "Outputs all HTTP calls a given test made that were logged."
+
+
class UrlParser(testtools.TestResult):
+
uuid_re = re.compile(r'(^|[^0-9a-f])[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{4}-[0-9a-f]{12}([^0-9a-f]|$)')
id_re = re.compile(r'(^|[^0-9a-z])[0-9a-z]{8}[0-9a-z]{4}[0-9a-z]{4}'
@@ -241,33 +247,12 @@
class ArgumentParser(argparse.ArgumentParser):
+
def __init__(self):
- desc = "Outputs all HTTP calls a given test made that were logged."
+ desc = DESCRIPTION
super(ArgumentParser, self).__init__(description=desc)
-
self.prog = "subunit-describe-calls"
-
- self.add_argument(
- "-s", "--subunit", metavar="<subunit file>",
- nargs="?", type=argparse.FileType('rb'), default=sys.stdin,
- help="The path to the subunit output file.")
-
- self.add_argument(
- "-n", "--non-subunit-name", metavar="<non subunit name>",
- default="pythonlogging",
- help="The name used in subunit to describe the file contents.")
-
- self.add_argument(
- "-o", "--output-file", metavar="<output file>", default=None,
- help="The output file name for the json.")
-
- self.add_argument(
- "-p", "--ports", metavar="<ports file>", default=None,
- help="A JSON file describing the ports for each service.")
-
- self.add_argument(
- "-v", "--verbose", action='store_true', default=False,
- help="Add Request and Response header and body data to stdout.")
+ _parser_add_args(self)
def parse(stream, non_subunit_name, ports):
@@ -321,11 +306,63 @@
sys.stdout.write('\n')
-def entry_point():
- cl_args = ArgumentParser().parse_args()
+def entry_point(cl_args=None):
+ print('Running subunit_describe_calls ...')
+ if not cl_args:
+ print("Use of: 'subunit-describe-calls' is deprecated, "
+ "please use: 'tempest subunit-describe-calls'")
+ cl_args = ArgumentParser().parse_args()
parser = parse(cl_args.subunit, cl_args.non_subunit_name, cl_args.ports)
output(parser, cl_args.output_file, cl_args.verbose)
+def _parser_add_args(parser):
+ parser.add_argument(
+ "-s", "--subunit", metavar="<subunit file>",
+ nargs="?", type=argparse.FileType('rb'), default=sys.stdin,
+ help="The path to the subunit output file(default:stdin v1/v2 stream)"
+ )
+
+ parser.add_argument(
+ "-n", "--non-subunit-name", metavar="<non subunit name>",
+ default="pythonlogging",
+ help="The name used in subunit to describe the file contents."
+ )
+
+ parser.add_argument(
+ "-o", "--output-file", metavar="<output file>", default=None,
+ help="The output file name for the json."
+ )
+
+ parser.add_argument(
+ "-p", "--ports", metavar="<ports file>", default=None,
+ help="A JSON file describing the ports for each service."
+ )
+
+ parser.add_argument(
+ "-v", "--verbose", action='store_true', default=False,
+ help="Add Request and Response header and body data to stdout."
+ )
+
+
+class TempestSubunitDescribeCalls(Command):
+
+ def get_parser(self, prog_name):
+ parser = super(TempestSubunitDescribeCalls, self).get_parser(prog_name)
+ _parser_add_args(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ try:
+ entry_point(parsed_args)
+
+ except Exception:
+ traceback.print_exc()
+ raise
+
+ def get_description(self):
+ return DESCRIPTION
+
+
if __name__ == "__main__":
entry_point()
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 6c2fee8..8d5bdbd 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -365,11 +365,11 @@
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
- else:
- if cfgname == 'identity':
- # Keystone is a required service for tempest
- continue
- if catalog_type not in services:
+ if cfgname == 'identity':
+ # Keystone is a required service for tempest
+ continue
+ if catalog_type not in services:
+ try:
if getattr(CONF.service_available, codename_match[cfgname]):
print('Endpoint type %s not found either disable service '
'%s or fix the catalog_type in the config file' % (
@@ -377,7 +377,13 @@
if update:
change_option(codename_match[cfgname],
'service_available', False)
- else:
+ except KeyError:
+ print('%s is a third party plugin, cannot be verified '
+ 'automatically, but it is suggested that it is set to '
+ 'False because %s service is not available ' % (
+ cfgname, catalog_type))
+ else:
+ try:
if not getattr(CONF.service_available,
codename_match[cfgname]):
print('Endpoint type %s is available, service %s should be'
@@ -391,6 +397,11 @@
avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
+ except KeyError:
+ print('%s is a third party plugin, cannot be verified '
+ 'automatically, but it is suggested that it is set to '
+ 'True because %s service is available ' % (
+ cfgname, catalog_type))
return avail_services
@@ -422,11 +433,6 @@
def main(opts=None):
- print('Running config verification...')
- if opts is None:
- print("Use of: 'verify-tempest-config' is deprecated, "
- "please use: 'tempest verify-config'")
- opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
@@ -486,7 +492,3 @@
LOG.exception("Failure verifying configuration.")
traceback.print_exc()
raise
-
-
-if __name__ == "__main__":
- main()
diff --git a/tempest/cmd/workspace.py b/tempest/cmd/workspace.py
index 929a584..d0c4b28 100644
--- a/tempest/cmd/workspace.py
+++ b/tempest/cmd/workspace.py
@@ -86,6 +86,7 @@
def rename_workspace(self, old_name, new_name):
self._populate()
self._name_exists(old_name)
+ self._invalid_name_check(new_name)
self._workspace_name_exists(new_name)
self.workspaces[new_name] = self.workspaces.pop(old_name)
self._write_file()
@@ -93,7 +94,7 @@
@lockutils.synchronized('workspaces', external=True)
def move_workspace(self, name, path):
self._populate()
- path = os.path.abspath(os.path.expanduser(path))
+ path = os.path.abspath(os.path.expanduser(path)) if path else path
self._name_exists(name)
self._validate_path(path)
self.workspaces[name] = path
@@ -114,6 +115,7 @@
@lockutils.synchronized('workspaces', external=True)
def remove_workspace_directory(self, workspace_path):
+ self._validate_path(workspace_path)
shutil.rmtree(workspace_path)
@lockutils.synchronized('workspaces', external=True)
@@ -128,7 +130,17 @@
name))
sys.exit(1)
+ def _invalid_name_check(self, name):
+ if not name:
+ print("None or empty name is specified."
+ " Please specify correct name for workspace.")
+ sys.exit(1)
+
def _validate_path(self, path):
+ if not path:
+ print("None or empty path is specified for workspace."
+ " Please specify correct workspace path.")
+ sys.exit(1)
if not os.path.exists(path):
print("Path does not exist.")
sys.exit(1)
@@ -137,10 +149,11 @@
def register_new_workspace(self, name, path, init=False):
"""Adds the new workspace and writes out the new workspace config"""
self._populate()
- path = os.path.abspath(os.path.expanduser(path))
+ path = os.path.abspath(os.path.expanduser(path)) if path else path
# This only happens when register is called from outside of init
if not init:
self._validate_path(path)
+ self._invalid_name_check(name)
self._workspace_name_exists(name)
self.workspaces[name] = path
self._write_file()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index f2730b3..edb9d16 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -44,15 +44,14 @@
def is_scheduler_filter_enabled(filter_name):
"""Check the list of enabled compute scheduler filters from config.
- This function checks whether the given compute scheduler filter is
- available and configured in the config file. If the
- scheduler_available_filters option is set to 'all' (Default value. which
- means default filters are configured in nova) in tempest.conf then, this
- function returns True with assumption that requested filter 'filter_name'
- is one of available filter in nova ("nova.scheduler.filters.all_filters").
+ This function checks whether the given compute scheduler filter is enabled
+ in the nova config file. If the scheduler_enabled_filters option is set to
+ 'all' in tempest.conf then, this function returns True with assumption that
+ requested filter 'filter_name' is one of the enabled filters in nova
+ ("nova.scheduler.filters.all_filters").
"""
- filters = CONF.compute_feature_enabled.scheduler_available_filters
+ filters = CONF.compute_feature_enabled.scheduler_enabled_filters
if not filters:
return False
if 'all' in filters:
@@ -79,23 +78,22 @@
:param wait_until: Server status to wait for the server to reach after
its creation.
:param volume_backed: Whether the server is volume backed or not.
- If this is true, a volume will be created and
- create server will be requested with
- 'block_device_mapping_v2' populated with below
- values:
- --------------------------------------------
- bd_map_v2 = [{
- 'uuid': volume['volume']['id'],
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'delete_on_termination': True}]
- kwargs['block_device_mapping_v2'] = bd_map_v2
- ---------------------------------------------
- If server needs to be booted from volume with other
- combination of bdm inputs than mentioned above, then
- pass the bdm inputs explicitly as kwargs and image_id
- as empty string ('').
+ If this is true, a volume will be created and create server will be
+ requested with 'block_device_mapping_v2' populated with below values:
+
+ .. code-block:: python
+
+ bd_map_v2 = [{
+ 'uuid': volume['volume']['id'],
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': True}]
+ kwargs['block_device_mapping_v2'] = bd_map_v2
+
+ If server needs to be booted from volume with other combination of bdm
+ inputs than mentioned above, then pass the bdm inputs explicitly as
+ kwargs and image_id as empty string ('').
:param name: Name of the server to be provisioned. If not defined a random
string ending with '-instance' will be generated.
:param flavor: Flavor of the server to be provisioned. If not defined,
@@ -169,11 +167,23 @@
params = {'name': volume_name,
'imageRef': image_id,
'size': CONF.volume.volume_size}
+ if CONF.compute.compute_volume_common_az:
+ params.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
volume = volumes_client.create_volume(**params)
- waiters.wait_for_volume_resource_status(volumes_client,
- volume['volume']['id'],
- 'available')
-
+ try:
+ waiters.wait_for_volume_resource_status(volumes_client,
+ volume['volume']['id'],
+ 'available')
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ try:
+ volumes_client.delete_volume(volume['volume']['id'])
+ volumes_client.wait_for_resource_deletion(
+ volume['volume']['id'])
+ except Exception as exc:
+ LOG.exception("Deleting volume %s failed, exception %s",
+ volume['volume']['id'], exc)
bd_map_v2 = [{
'uuid': volume['volume']['id'],
'source_type': 'volume',
@@ -186,6 +196,9 @@
# to be specified.
image_id = ''
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
body = clients.servers_client.create_server(name=name, imageRef=image_id,
flavorRef=flavor,
**kwargs)
@@ -385,8 +398,26 @@
def _upgrade(self, url):
"""Upgrade the HTTP connection to a WebSocket and verify."""
- # The real request goes to the /websockify URI always
- reqdata = 'GET /websockify HTTP/1.1\r\n'
+ # It is possible to pass the path as a query parameter in the request,
+ # so use it if present
+ # Given noVNC format
+ # https://x.com/vnc_auto.html?path=%3Ftoken%3Dxxx,
+ # url format is
+ # ParseResult(scheme='https', netloc='x.com',
+ # path='/vnc_auto.html', params='',
+ # query='path=%3Ftoken%3Dxxx', fragment='').
+ # qparams format is {'path': ['?token=xxx']}
+ qparams = urlparse.parse_qs(url.query)
+ # according to references
+ # https://docs.python.org/3/library/urllib.parse.html
+ # https://tools.ietf.org/html/rfc3986#section-3.4
+ # qparams['path'][0] format is '?token=xxx' without / prefix
+ # remove / in /websockify to comply to references.
+ path = qparams['path'][0] if 'path' in qparams else 'websockify'
+ # Fix websocket request format by adding / prefix.
+ # Updated request format: GET /?token=xxx HTTP/1.1
+ # or GET /websockify HTTP/1.1
+ reqdata = 'GET /%s HTTP/1.1\r\n' % path
reqdata += 'Host: %s' % url.hostname
# Add port only if we have one specified
if url.port:
@@ -395,7 +426,7 @@
reqdata += '\r\n'
# Tell the HTTP Server to Upgrade the connection to a WebSocket
reqdata += 'Upgrade: websocket\r\nConnection: Upgrade\r\n'
- # The token=xxx is sent as a Cookie not in the URI
+ # The token=xxx is sent as a Cookie not in the URI for noVNC < v1.1.0
reqdata += 'Cookie: %s\r\n' % url.query
# Use a hard-coded WebSocket key since a test program
reqdata += 'Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n'
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index eaf651b..cd6d058 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -26,7 +26,7 @@
if project['name'] == project_name:
return project
raise lib_exc.NotFound('No such project(%s) in %s' % (project_name,
- projects))
+ projects))
def get_tenant_by_name(client, tenant_name):
@@ -64,7 +64,8 @@
should not be used for testing identity features.
:param clients: a client manager.
- :return
+ :return: v2 or v3 of CredsClient
+ :rtype: V2CredsClient or V3CredsClient
"""
if CONF.identity.auth_version == 'v2':
client = clients.identity_client
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 52ccfa9..b68a879 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -73,6 +73,13 @@
msg = "'TYPE' column is required but the output doesn't have it: "
raise tempest.lib.exceptions.TempestException(msg + output)
+ def list_disks(self):
+ disks_list = self.get_disks()
+ disks_list = [line[0] for line in
+ [device_name.split()
+ for device_name in disks_list.splitlines()][1:]]
+ return disks_list
+
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
@@ -98,6 +105,7 @@
def get_nic_name_by_ip(self, address):
cmd = "ip -o addr | awk '/%s/ {print $2}'" % address
nic = self.exec_command(cmd)
+ LOG.debug('(get_nic_name_by_ip) Command result: %s', nic)
return nic.strip().strip(":").split('@')[0].lower()
def get_dns_servers(self):
@@ -147,7 +155,7 @@
self.exec_command('sudo umount %s' % mount_path)
def make_fs(self, dev_name, fs='ext4'):
- cmd_mkfs = 'sudo /usr/sbin/mke2fs -t %s /dev/%s' % (fs, dev_name)
+ cmd_mkfs = 'sudo mke2fs -t %s /dev/%s' % (fs, dev_name)
try:
self.exec_command(cmd_mkfs)
except tempest.lib.exceptions.SSHExecCommandFailed:
@@ -155,3 +163,53 @@
cmd_why = 'sudo ls -lR /dev'
LOG.info("Contents of /dev: %s", self.exec_command(cmd_why))
raise
+
+ def nc_listen_host(self, port=80, protocol='tcp'):
+ """Creates persistent nc server listening on the given TCP / UDP port
+
+ :port: the port to start listening on.
+ :protocol: the protocol used by the server. TCP by default.
+ """
+ udp = '-u' if protocol.lower() == 'udp' else ''
+ cmd = "sudo nc %(udp)s -p %(port)s -lk -e echo foolish &" % {
+ 'udp': udp, 'port': port}
+ return self.exec_command(cmd)
+
+ def nc_host(self, host, port=80, protocol='tcp', expected_response=None):
+ """Check connectivity to TCP / UDP port at host via nc
+
+ :host: an IP against which the connectivity will be tested.
+ :port: the port to check connectivity against.
+ :protocol: the protocol used by nc to send packets. TCP by default.
+ :expected_response: string representing the expected response
+ from server.
+ :raises SSHExecCommandFailed: if an expected response is given and it
+ does not match the actual server response.
+ """
+ udp = '-u' if protocol.lower() == 'udp' else ''
+ cmd = 'echo "bar" | nc -w 1 %(udp)s %(host)s %(port)s' % {
+ 'udp': udp, 'host': host, 'port': port}
+ response = self.exec_command(cmd)
+
+ # sending an UDP packet will always succeed. we need to check
+ # the response.
+ if (expected_response is not None and
+ expected_response != response.strip()):
+ raise tempest.lib.exceptions.SSHExecCommandFailed(
+ command=cmd, exit_status=0, stdout=response, stderr='')
+ return response
+
+ def icmp_check(self, host, nic=None):
+ """Wrapper for icmp connectivity checks"""
+ return self.ping_host(host, nic=nic)
+
+ def udp_check(self, host, **kwargs):
+ """Wrapper for udp connectivity checks."""
+ kwargs.pop('nic', None)
+ return self.nc_host(host, protocol='udp', expected_response='foolish',
+ **kwargs)
+
+ def tcp_check(self, host, **kwargs):
+ """Wrapper for tcp connectivity checks."""
+ kwargs.pop('nic', None)
+ return self.nc_host(host, **kwargs)
diff --git a/tempest/common/utils/net_utils.py b/tempest/common/utils/net_utils.py
index 867b3dd..b697ef1 100644
--- a/tempest/common/utils/net_utils.py
+++ b/tempest/common/utils/net_utils.py
@@ -19,7 +19,6 @@
def get_unused_ip_addresses(ports_client, subnets_client,
network_id, subnet_id, count):
-
"""Return a list with the specified number of unused IP addresses
This method uses the given ports_client to find the specified number of
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 0e86f05..14790d6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -104,8 +104,8 @@
body = client.show_server(server_id)['server']
except lib_exc.NotFound:
return
- old_status = server_status = body['status']
- old_task_state = task_state = _get_task_state(body)
+ old_status = body['status']
+ old_task_state = _get_task_state(body)
start_time = int(time.time())
while True:
time.sleep(client.build_interval)
@@ -121,7 +121,15 @@
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if server_status == 'ERROR' and not ignore_error:
- raise lib_exc.DeleteErrorException(resource_id=server_id)
+ raise lib_exc.DeleteErrorException(
+ "Server %s failed to delete and is in ERROR status" %
+ server_id)
+ if server_status == 'SOFT_DELETED':
+ # Soft-deleted instances need to be forcibly deleted to
+ # prevent some test cases from failing.
+ LOG.debug("Automatically force-deleting soft-deleted server %s",
+ server_id)
+ client.force_delete_server(server_id)
if int(time.time()) - start_time >= client.build_timeout:
raise lib_exc.TimeoutException
@@ -202,6 +210,8 @@
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
raise exceptions.VolumeRestoreErrorException(volume_id=resource_id)
+ if resource_status == 'error_extending' and resource_status != status:
+ raise exceptions.VolumeExtendErrorException(volume_id=resource_id)
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
@@ -213,6 +223,47 @@
resource_name, resource_id, status, time.time() - start)
+def wait_for_volume_attachment_remove(client, volume_id, attachment_id):
+ """Waits for a volume attachment to be removed from a given volume."""
+ start = int(time.time())
+ attachments = client.show_volume(volume_id)['volume']['attachments']
+ while any(attachment_id == a['attachment_id'] for a in attachments):
+ time.sleep(client.build_interval)
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Failed to remove attachment %s from volume %s '
+ 'within the required time (%s s).' %
+ (attachment_id, volume_id, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+ attachments = client.show_volume(volume_id)['volume']['attachments']
+ LOG.info('Attachment %s removed from volume %s after waiting for %f '
+ 'seconds', attachment_id, volume_id, time.time() - start)
+
+
+def wait_for_volume_migration(client, volume_id, new_host):
+ """Waits for a Volume to move to a new host."""
+ body = client.show_volume(volume_id)['volume']
+ host = body['os-vol-host-attr:host']
+ migration_status = body['migration_status']
+ start = int(time.time())
+
+ # new_host is hostname@backend while current_host is hostname@backend#type
+ while migration_status != 'success' or new_host not in host:
+ time.sleep(client.build_interval)
+ body = client.show_volume(volume_id)['volume']
+ host = body['os-vol-host-attr:host']
+ migration_status = body['migration_status']
+
+ if migration_status == 'error':
+ message = ('volume %s failed to migrate.' % (volume_id))
+ raise lib_exc.TempestException(message)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Volume %s failed to migrate to %s (current %s) '
+ 'within the required time (%s s).' %
+ (volume_id, new_host, host, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+
+
def wait_for_volume_retype(client, volume_id, new_volume_type):
"""Waits for a Volume to have a new volume type."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index dbce504..204d977 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -20,6 +20,7 @@
from oslo_concurrency import lockutils
from oslo_config import cfg
+from oslo_config import types
from oslo_log import log as logging
from tempest.lib import exceptions
@@ -68,10 +69,7 @@
cfg.StrOpt('default_credentials_domain_name',
default='Default',
help="Default domain used when getting v3 credentials. "
- "This is the name keystone uses for v2 compatibility.",
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_isolation_domain_name',
- group='auth')]),
+ "This is the name keystone uses for v2 compatibility."),
cfg.BoolOpt('create_isolated_networks',
default=True,
help="If use_dynamic_credentials is set to True and Neutron "
@@ -83,27 +81,20 @@
cfg.StrOpt('admin_username',
help="Username for an administrative user. This is needed for "
"authenticating requests made by project isolation to "
- "create users and projects",
- deprecated_group='identity'),
+ "create users and projects"),
cfg.StrOpt('admin_project_name',
help="Project name to use for an administrative user. This is "
"needed for authenticating requests made by project "
- "isolation to create users and projects",
- deprecated_opts=[cfg.DeprecatedOpt('admin_tenant_name',
- group='auth'),
- cfg.DeprecatedOpt('admin_tenant_name',
- group='identity')]),
+ "isolation to create users and projects"),
cfg.StrOpt('admin_password',
help="Password to use for an administrative user. This is "
"needed for authenticating requests made by project "
"isolation to create users and projects",
- secret=True,
- deprecated_group='identity'),
+ secret=True),
cfg.StrOpt('admin_domain_name',
default='Default',
- help="Admin domain name for authentication (Keystone V3)."
- "The same domain applies to user and project",
- deprecated_group='identity'),
+ help="Admin domain name for authentication (Keystone V3). "
+ "The same domain applies to user and project"),
]
identity_group = cfg.OptGroup(name='identity',
@@ -145,9 +136,7 @@
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The public endpoint type to use for OpenStack Identity "
- "(Keystone) API v2",
- deprecated_opts=[cfg.DeprecatedOpt('endpoint_type',
- group='identity')]),
+ "(Keystone) API v2"),
cfg.StrOpt('v3_endpoint_type',
default='adminURL',
choices=['public', 'admin', 'internal',
@@ -170,15 +159,35 @@
cfg.IntOpt('user_lockout_failure_attempts',
default=2,
help="The number of unsuccessful login attempts the user is "
- "allowed before having the account locked."),
+ "allowed before having the account locked. This only "
+ "takes effect when identity-feature-enabled."
+ "security_compliance is set to 'True'. For more details, "
+ "refer to keystone config options keystone.conf:"
+ "security_compliance.lockout_failure_attempts. "
+ "This feature is disabled by default in keystone."),
cfg.IntOpt('user_lockout_duration',
default=5,
help="The number of seconds a user account will remain "
- "locked."),
+ "locked. This only takes "
+ "effect when identity-feature-enabled.security_compliance "
+ "is set to 'True'. For more details, refer to "
+ "keystone config options "
+ "keystone.conf:security_compliance.lockout_duration. "
+ "Setting this option will have no effect unless you also "
+ "set identity.user_lockout_failure_attempts."),
cfg.IntOpt('user_unique_last_password_count',
default=2,
help="The number of passwords for a user that must be unique "
- "before an old password can be reused."),
+ "before an old password can be reused. This only takes "
+ "effect when identity-feature-enabled.security_compliance "
+ "is set to 'True'. "
+ "This config option corresponds to keystone.conf: "
+ "security_compliance.unique_last_password_count, whose "
+ "default value is 0 meaning disabling this feature. "
+ "NOTE: This config option value must be same as "
+ "keystone.conf: security_compliance.unique_last_password_"
+ "count otherwise test might fail"
+ ),
]
service_clients_group = cfg.OptGroup(name='service-clients',
@@ -242,7 +251,13 @@
cfg.BoolOpt('application_credentials',
default=False,
help='Does the environment have application credentials '
- 'enabled?')
+ 'enabled?'),
+ cfg.BoolOpt('immutable_user_source',
+ default=False,
+ help='Set to True if the environment has a read-only '
+ 'user source. This will skip all tests that attempt to '
+ 'create, delete, or modify users. This should not be set '
+ 'to True if using dynamic credentials')
]
compute_group = cfg.OptGroup(name='compute',
@@ -256,6 +271,17 @@
help="Valid secondary image reference to be used in tests. "
"This is a required option, but if only one image is "
"available duplicate the value of image_ref above"),
+ cfg.StrOpt('certified_image_ref',
+ help="Valid image reference to be used in image certificate "
+ "validation tests when enabled. This image must also "
+ "have the required img_signature_* properties set. "
+ "Additional details available within the following Nova "
+ "documentation: https://docs.openstack.org/nova/latest/"
+ "user/certificate-validation.html"),
+ cfg.ListOpt('certified_image_trusted_certs',
+ help="A list of trusted certificates to be used when the "
+ "image certificate validation compute feature is "
+ "enabled."),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
@@ -337,8 +363,53 @@
"If both values are not specified, Tempest avoids tests "
"which require a microversion. Valid values are string "
"with format 'X.Y' or string 'latest'"),
+ cfg.StrOpt('compute_volume_common_az',
+ default=None,
+ help='AZ to be used for Cinder and Nova. Set this parameter '
+ 'when the cloud has nova.conf: cinder.cross_az_attach '
+ 'set to false. Which means volumes attached to an '
+ 'instance must be in the same availability zone in Cinder '
+ 'as the instance availability zone in Nova. Set the '
+ 'common availability zone in this config which will be '
+ 'used to boot an instance as well as creating a volume. '
+ 'NOTE: If that AZ is not in Cinder (or '
+ 'allow_availability_zone_fallback=False in cinder.conf), '
+ 'the volume create request will fail and the instance '
+ 'will fail the build request.'),
]
+placement_group = cfg.OptGroup(name='placement',
+ title='Placement Service Options')
+
+PlacementGroup = [
+ cfg.StrOpt('endpoint_type',
+ default='public',
+ choices=['public', 'admin', 'internal'],
+ help="The endpoint type to use for the placement service."),
+ cfg.StrOpt('catalog_type',
+ default='placement',
+ help="Catalog type of the Placement service."),
+ cfg.StrOpt('region',
+ default='',
+ help="The placement region name to use. If empty, the value "
+ "of [identity]/region is used instead. If no such region "
+ "is found in the service catalog, the first region found "
+ "is used."),
+ cfg.StrOpt('min_microversion',
+ default=None,
+ help="Lower version of the test target microversion range. "
+ "The format is 'X.Y', where 'X' and 'Y' are int values. "
+ "Valid values are string with format 'X.Y' or string "
+ "'latest'"),
+ cfg.StrOpt('max_microversion',
+ default=None,
+ help="Upper version of the test target microversion range. "
+ "The format is 'X.Y', where 'X' and 'Y' are int values. "
+ "Valid values are string with format 'X.Y' or string "
+ "'latest'"),
+]
+
+
compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
title="Enabled Compute Service Features")
@@ -404,7 +475,14 @@
default=False,
help="Does the test environment support block migration with "
"Cinder iSCSI volumes. Note: libvirt >= 1.2.17 is required "
- "to support this if using the libvirt compute driver."),
+ "to support this if using the libvirt compute driver.",
+ deprecated_for_removal=True,
+ deprecated_reason='This option duplicates the more generic '
+ '[compute-feature-enabled]/block_migration '
+ '_for_live_migration now that '
+ 'MIN_LIBVIRT_VERSION is >= 1.2.17 on all '
+ 'branches from stable/rocky and will be '
+ 'removed in a future release.'),
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
@@ -412,15 +490,28 @@
cfg.StrOpt('vnc_server_header',
default='WebSockify',
help='Expected VNC server name (WebSockify, nginx, etc) '
- 'in response header.'),
+ 'in response header.',
+ deprecated_for_removal=True,
+ deprecated_reason='This option will be ignored because the '
+ 'usage of different response header fields '
+ 'to accomplish the same goal (in accordance '
+ 'with RFC7231 S6.2.2) makes it obsolete.'),
cfg.BoolOpt('spice_console',
default=False,
help='Enable Spice console. This configuration value should '
- 'be same as nova.conf: spice.enabled'),
+ 'be same as nova.conf: spice.enabled',
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is not being used "
+ "in Tempest, we can add it back when "
+ "adding the test cases."),
cfg.BoolOpt('rdp_console',
default=False,
help='Enable RDP console. This configuration value should '
- 'be same as nova.conf: rdp.enabled'),
+ 'be same as nova.conf: rdp.enabled',
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is not being used "
+ "in Tempest, we can add it back when "
+ "adding the test cases."),
cfg.BoolOpt('serial_console',
default=False,
help='Enable serial console. This configuration value '
@@ -430,6 +521,10 @@
default=True,
help='Does the test environment support instance rescue '
'mode?'),
+ cfg.BoolOpt('stable_rescue',
+ default=False,
+ help='Does the test environment support stable device '
+ 'instance rescue mode?'),
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the '
@@ -463,20 +558,29 @@
cfg.BoolOpt('config_drive',
default=True,
help='Enable special configuration drive with metadata.'),
- cfg.ListOpt('scheduler_available_filters',
- default=['all'],
- help="A list of enabled filters that nova will accept as hints"
- " to the scheduler when creating a server. A special "
- "entry 'all' indicates all filters that are included "
- "with nova are enabled. Empty list indicates all filters "
- "are disabled. The full list of available filters is in "
- "nova.conf: filter_scheduler.enabled_filters. If the "
+ cfg.ListOpt('scheduler_enabled_filters',
+ default=["AvailabilityZoneFilter", "ComputeFilter",
+ "ComputeCapabilitiesFilter", "ImagePropertiesFilter",
+ "ServerGroupAntiAffinityFilter",
+ "ServerGroupAffinityFilter"],
+ help="A list of enabled filters that Nova will accept as "
+ "hints to the scheduler when creating a server. If the "
"default value is overridden in nova.conf by the test "
"environment (which means that a different set of "
"filters is enabled than what is included in Nova by "
- "default) then, this option must be configured to "
+ "default), then this option must be configured to "
"contain the same filters that Nova uses in the test "
- "environment."),
+ "environment. A special entry 'all' indicates all "
+ "filters that are included with Nova are enabled. If "
+ "using 'all', be sure to enable all filters in "
+ "nova.conf, as tests can fail in unpredictable ways if "
+ "Nova's and Tempest's enabled filters don't match. "
+ "Empty list indicates all filters are disabled. The "
+ "full list of enabled filters is in nova.conf: "
+ "filter_scheduler.enabled_filters.",
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'scheduler_available_filters',
+ group='compute-feature-enabled')]),
cfg.BoolOpt('swap_volume',
default=False,
help='Does the test environment support in-place swapping of '
@@ -559,6 +663,7 @@
network_group = cfg.OptGroup(name='network',
title='Network Service Options')
+ProfileType = types.Dict(types.List(types.String(), bounds=True))
NetworkGroup = [
cfg.StrOpt('catalog_type',
default='network',
@@ -581,7 +686,7 @@
default=28,
help="The mask bits for project ipv4 subnets"),
cfg.StrOpt('project_network_v6_cidr',
- default="2003::/48",
+ default="2001:db8::/48",
help="The cidr block to allocate project ipv6 subnets from"),
cfg.IntOpt('project_network_v6_mask_bits',
default=64,
@@ -598,6 +703,11 @@
cfg.StrOpt('floating_network_name',
help="Default floating network name. Used to allocate floating "
"IPs when neutron is enabled."),
+ cfg.StrOpt('subnet_id',
+ default="",
+ help="Subnet id of subnet which is used for allocation of "
+ "floating IPs. Specify when two or more subnets are "
+ "present in network."),
cfg.StrOpt('public_router_id',
default="",
help="Id of the public router that provides external "
@@ -615,17 +725,21 @@
cfg.ListOpt('dns_servers',
default=["8.8.8.8", "8.8.4.4"],
help="List of dns servers which should be used"
- " for subnet creation"),
+ " for subnet creation",
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is no longer "
+ "used anywhere, so it can be removed."),
cfg.StrOpt('port_vnic_type',
choices=[None, 'normal', 'direct', 'macvtap'],
help="vnic_type to use when launching instances"
" with pre-configured ports."
" Supported ports are:"
" ['normal','direct','macvtap']"),
- cfg.DictOpt('port_profile',
- default={},
- help="port profile to use when launching instances"
- " with pre-configured ports."),
+ cfg.Opt('port_profile',
+ type=ProfileType,
+ default={},
+ help="port profile to use when launching instances"
+ " with pre-configured ports."),
cfg.ListOpt('default_network',
default=["1.0.0.0/16", "2.0.0.0/16"],
help="List of ip pools"
@@ -664,7 +778,13 @@
help="Does the test environment support port security?"),
cfg.BoolOpt('floating_ips',
default=True,
- help='Does the test environment support floating_ips')
+ help='Does the test environment support floating_ips'),
+ cfg.StrOpt('qos_placement_physnet', default=None,
+ help='Name of the physnet for placement based minimum '
+ 'bandwidth allocation.'),
+ cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
+ help='Base segmentation ID to create provider networks. '
+ 'This value will be increased in case of conflict.')
]
validation_group = cfg.OptGroup(name='validation',
@@ -713,9 +833,10 @@
help="User name used to authenticate to an instance."),
cfg.StrOpt('image_ssh_password',
default="password",
- help="Password used to authenticate to an instance."),
+ help="Password used to authenticate to an instance.",
+ secret=True),
cfg.StrOpt('ssh_shell_prologue',
- default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
+ default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
help="Shell fragments to use before executing a command "
"when sshing to a guest."),
cfg.IntOpt('ping_size',
@@ -978,7 +1099,12 @@
choices=["udhcpc", "dhclient", ""],
help='DHCP client used by images to renew DCHP lease. '
'If left empty, update operation will be skipped. '
- 'Supported clients: "udhcpc", "dhclient"')
+ 'Supported clients: "udhcpc", "dhclient"'),
+ cfg.StrOpt('protocol',
+ default='icmp',
+ choices=('icmp', 'tcp', 'udp'),
+ help='The protocol used in security groups tests to check '
+ 'connectivity.'),
]
@@ -1029,6 +1155,18 @@
""")
]
+
+profiler_group = cfg.OptGroup(name="profiler",
+ title="OpenStack Profiler")
+
+ProfilerGroup = [
+ cfg.StrOpt('key',
+ help="The secret key to enable OpenStack Profiler. The value "
+ "should match the one configured in OpenStack services "
+ "under `[profiler]/hmac_keys` property. The default empty "
+ "value keeps profiling disabled"),
+]
+
DefaultGroup = [
cfg.BoolOpt('pause_teardown',
default=False,
@@ -1060,6 +1198,8 @@
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
+ (placement_group, PlacementGroup),
+ (profiler_group, ProfilerGroup),
(None, DefaultGroup)
]
@@ -1177,7 +1317,7 @@
logging_cfg_path = "%s/logging.conf" % os.path.dirname(path)
if ((not hasattr(_CONF, 'log_config_append') or
- _CONF.log_config_append is None) and
+ _CONF.log_config_append is None) and
os.path.isfile(logging_cfg_path)):
# if logging conf is in place we need to set log_config_append
_CONF.log_config_append = logging_cfg_path
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
old mode 100644
new mode 100755
index a430d5d..c05e7a6
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -42,6 +42,11 @@
message = "Volume %(volume_id)s failed to restore and is in ERROR status"
+class VolumeExtendErrorException(exceptions.TempestException):
+ message = ("Volume %(volume_id)s failed to extend and "
+ "is in error_extending status")
+
+
class StackBuildErrorException(exceptions.TempestException):
message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
"due to '%(stack_status_reason)s'")
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 2c40cb1..6a97a00 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -15,6 +15,7 @@
import os
import re
+from hacking import core
import pycodestyle
@@ -25,7 +26,6 @@
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
-VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
@@ -39,6 +39,7 @@
_HAVE_NEGATIVE_DECORATOR = False
+@core.flake8ext
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
@@ -53,6 +54,7 @@
" in tempest/api/* or tempest/scenario/* tests"))
+@core.flake8ext
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
@@ -67,6 +69,7 @@
"T104: Scenario tests require a service decorator")
+@core.flake8ext
def no_setup_teardown_class_for_tests(physical_line, filename):
if pycodestyle.noqa(physical_line):
@@ -80,20 +83,7 @@
"T105: (setUp|tearDown)Class can not be used in tests")
-def no_vi_headers(physical_line, line_number, lines):
- """Check for vi editor configuration in source files.
-
- By default vi modelines can only appear in the first or
- last 5 lines of a source file.
-
- T106
- """
- # NOTE(gilliard): line_number is 1-indexed
- if line_number <= 5 or line_number > len(lines) - 5:
- if VI_HEADER_RE.match(physical_line):
- return 0, "T106: Don't put vi configuration in source files"
-
-
+@core.flake8ext
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
@@ -117,6 +107,7 @@
"T107: service tag should not be in path")
+@core.flake8ext
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
@@ -127,6 +118,7 @@
return 0, msg
+@core.flake8ext
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
@@ -137,6 +129,7 @@
yield (0, msg)
+@core.flake8ext
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
@@ -170,7 +163,8 @@
return True
-def get_resources_on_service_clients(logical_line, physical_line, filename,
+@core.flake8ext
+def get_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of GET should be consistent
@@ -197,7 +191,8 @@
yield (0, msg)
-def delete_resources_on_service_clients(logical_line, physical_line, filename,
+@core.flake8ext
+def delete_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of DELETE should be consistent
@@ -223,6 +218,7 @@
yield (0, msg)
+@core.flake8ext
def dont_import_local_tempest_into_lib(logical_line, filename):
"""Check that tempest.lib should not import local tempest code
@@ -244,6 +240,7 @@
yield (0, msg)
+@core.flake8ext
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
@@ -260,6 +257,7 @@
yield (0, msg)
+@core.flake8ext
def dont_use_config_in_tempest_lib(logical_line, filename):
"""Check that tempest.lib doesn't use tempest config
@@ -277,7 +275,8 @@
yield(0, msg)
-def dont_put_admin_tests_on_nonadmin_path(logical_line, physical_line,
+@core.flake8ext
+def dont_put_admin_tests_on_nonadmin_path(logical_line,
filename):
"""Check admin tests should exist under admin path
@@ -287,9 +286,6 @@
if 'tempest/api/' not in filename:
return
- if pycodestyle.noqa(physical_line):
- return
-
if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
@@ -298,6 +294,7 @@
yield(0, msg)
+@core.flake8ext
def unsupported_exception_attribute_PY3(logical_line):
"""Check Unsupported 'message' exception attribute in PY3
@@ -309,6 +306,7 @@
yield(0, msg)
+@core.flake8ext
def negative_test_attribute_always_applied_to_negative_tests(physical_line,
filename):
"""Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
@@ -330,22 +328,3 @@
" to all negative API tests"
)
_HAVE_NEGATIVE_DECORATOR = False
-
-
-def factory(register):
- register(import_no_clients_in_api_and_scenario_tests)
- register(scenario_tests_need_service_tags)
- register(no_setup_teardown_class_for_tests)
- register(no_vi_headers)
- register(service_tags_not_in_module_path)
- register(no_hyphen_at_end_of_rand_name)
- register(no_mutable_default_args)
- register(no_testtools_skip_decorator)
- register(get_resources_on_service_clients)
- register(delete_resources_on_service_clients)
- register(dont_import_local_tempest_into_lib)
- register(dont_use_config_in_tempest_lib)
- register(use_rand_uuid_instead_of_uuid4)
- register(dont_put_admin_tests_on_nonadmin_path)
- register(unsupported_exception_attribute_PY3)
- register(negative_test_attribute_always_applied_to_negative_tests)
diff --git a/tempest/lib/api_schema/response/compute/v2_1/volumes.py b/tempest/lib/api_schema/response/compute/v2_1/volumes.py
index c35dae9..d367f2a 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/volumes.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/volumes.py
@@ -50,7 +50,8 @@
# If it would come as empty array "[]" then,
# those elements can be defined as 'required'.
}
- }
+ },
+ 'os-vol-host-attr:host': {'type': 'string'},
},
'additionalProperties': False,
'required': ['id', 'status', 'displayName', 'availabilityZone',
diff --git a/tempest/lib/api_schema/response/compute/v2_16/servers.py b/tempest/lib/api_schema/response/compute/v2_16/servers.py
index 72b84f5..fc81ff7 100644
--- a/tempest/lib/api_schema/response/compute/v2_16/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_16/servers.py
@@ -168,3 +168,6 @@
servers.rebuild_server_with_admin_pass)
show_server_diagnostics = copy.deepcopy(servers.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(servers.get_remote_consoles)
+attach_volume = copy.deepcopy(servers.attach_volume)
+show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index e3e8ad1..b6c3c14 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -58,3 +58,6 @@
list_servers = copy.deepcopy(serversv216.list_servers)
show_server_diagnostics = copy.deepcopy(serversv216.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(serversv216.get_remote_consoles)
+attach_volume = copy.deepcopy(serversv216.attach_volume)
+show_volume_attachment = copy.deepcopy(serversv216.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(serversv216.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_26/servers.py b/tempest/lib/api_schema/response/compute/v2_26/servers.py
index 8e62dc3..5a0f987 100644
--- a/tempest/lib/api_schema/response/compute/v2_26/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_26/servers.py
@@ -101,3 +101,6 @@
list_servers = copy.deepcopy(servers219.list_servers)
show_server_diagnostics = copy.deepcopy(servers219.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(servers219.get_remote_consoles)
+attach_volume = copy.deepcopy(servers219.attach_volume)
+show_volume_attachment = copy.deepcopy(servers219.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers219.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_3/servers.py b/tempest/lib/api_schema/response/compute/v2_3/servers.py
index 18fb352..1674c1b 100644
--- a/tempest/lib/api_schema/response/compute/v2_3/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_3/servers.py
@@ -173,3 +173,6 @@
rebuild_server_with_admin_pass = copy.deepcopy(
servers.rebuild_server_with_admin_pass)
show_server_diagnostics = copy.deepcopy(servers.show_server_diagnostics)
+attach_volume = copy.deepcopy(servers.attach_volume)
+show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_47/servers.py b/tempest/lib/api_schema/response/compute/v2_47/servers.py
index 0fbacd3..d580f2c 100644
--- a/tempest/lib/api_schema/response/compute/v2_47/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_47/servers.py
@@ -66,3 +66,6 @@
update_tag = copy.deepcopy(servers226.update_tag)
delete_tag = copy.deepcopy(servers226.delete_tag)
list_servers = copy.deepcopy(servers226.list_servers)
+attach_volume = copy.deepcopy(servers226.attach_volume)
+show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_48/servers.py b/tempest/lib/api_schema/response/compute/v2_48/servers.py
index 84b5a2a..e2e45bc 100644
--- a/tempest/lib/api_schema/response/compute/v2_48/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_48/servers.py
@@ -129,3 +129,6 @@
rebuild_server = copy.deepcopy(servers247.rebuild_server)
rebuild_server_with_admin_pass = copy.deepcopy(
servers247.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers247.attach_volume)
+show_volume_attachment = copy.deepcopy(servers247.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers247.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_54/servers.py b/tempest/lib/api_schema/response/compute/v2_54/servers.py
index 099e1b8..2c2bff0 100644
--- a/tempest/lib/api_schema/response/compute/v2_54/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_54/servers.py
@@ -55,3 +55,6 @@
check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
update_tag = copy.deepcopy(servers248.update_tag)
delete_tag = copy.deepcopy(servers248.delete_tag)
+attach_volume = copy.deepcopy(servers248.attach_volume)
+show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/servers.py b/tempest/lib/api_schema/response/compute/v2_57/servers.py
index 0099a2b..aa57d25 100644
--- a/tempest/lib/api_schema/response/compute/v2_57/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_57/servers.py
@@ -59,3 +59,6 @@
check_tag_existence = copy.deepcopy(servers254.check_tag_existence)
update_tag = copy.deepcopy(servers254.update_tag)
delete_tag = copy.deepcopy(servers254.delete_tag)
+attach_volume = copy.deepcopy(servers254.attach_volume)
+show_volume_attachment = copy.deepcopy(servers254.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers254.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_59/__init__.py b/tempest/lib/api_schema/response/compute/v2_59/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_59/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_59/migrations.py b/tempest/lib/api_schema/response/compute/v2_59/migrations.py
new file mode 100644
index 0000000..a37c0f1
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_59/migrations.py
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_23 import migrations
+
+###########################################################################
+#
+# 2.59:
+#
+# The uuid value is now returned in the response body in addition to the
+# migration id for the following API responses:
+#
+# - GET /os-migrations
+# - GET /servers/{server_id}/migrations/{migration_id}
+# - GET /servers/{server_id}/migrations
+#
+###########################################################################
+
+uuid = {'type': 'string', 'format': 'uuid'}
+
+list_migrations = copy.deepcopy(migrations.list_migrations)
+list_migrations['response_body']['properties']['migrations']['items'][
+ 'properties'].update({'uuid': uuid})
+list_migrations['response_body']['properties']['migrations']['items'][
+ 'required'].append('uuid')
diff --git a/tempest/lib/api_schema/response/compute/v2_6/servers.py b/tempest/lib/api_schema/response/compute/v2_6/servers.py
index d5774de..922bf79 100644
--- a/tempest/lib/api_schema/response/compute/v2_6/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_6/servers.py
@@ -28,6 +28,9 @@
rebuild_server_with_admin_pass = copy.deepcopy(
servers.rebuild_server_with_admin_pass)
show_server_diagnostics = copy.deepcopy(servers.show_server_diagnostics)
+attach_volume = copy.deepcopy(servers.attach_volume)
+show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
# NOTE: The consolidated remote console API got introduced with v2.6
# with bp/consolidate-console-api. See Nova commit 578bafeda
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
index 3c3d41c..01910aa 100644
--- a/tempest/lib/api_schema/response/compute/v2_63/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -73,3 +73,6 @@
check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
update_tag = copy.deepcopy(servers257.update_tag)
delete_tag = copy.deepcopy(servers257.delete_tag)
+attach_volume = copy.deepcopy(servers257.attach_volume)
+show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_70/__init__.py b/tempest/lib/api_schema/response/compute/v2_70/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_70/servers.py b/tempest/lib/api_schema/response/compute/v2_70/servers.py
new file mode 100644
index 0000000..5ca4cc8
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/servers.py
@@ -0,0 +1,80 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import servers as servers2_1
+from tempest.lib.api_schema.response.compute.v2_63 import servers as servers263
+
+
+###########################################################################
+#
+# 2.70:
+#
+# Exposes virtual device tags for volume attachments and virtual interfaces
+# (ports). A tag parameter is added to the response body for the following
+# APIs:
+#
+# Volumes
+#
+# - GET /servers/{server_id}/os-volume_attachments (list)
+# - GET /servers/{server_id}/os-volume_attachments/{volume_id} (show)
+# - POST /servers/{server_id}/os-volume_attachments (attach)
+#
+# Ports
+#
+# - GET /servers/{server_id}/os-interface (list)
+# - GET /servers/{server_id}/os-interface/{port_id} (show)
+# - POST /servers/{server_id}/os-interface (attach)
+#
+###########################################################################
+
+attach_volume = copy.deepcopy(servers2_1.attach_volume)
+attach_volume['response_body']['properties']['volumeAttachment'][
+ 'properties'].update({'tag': {'type': ['string', 'null']}})
+attach_volume['response_body']['properties']['volumeAttachment'][
+ 'required'].append('tag')
+
+show_volume_attachment = copy.deepcopy(servers2_1.show_volume_attachment)
+show_volume_attachment['response_body']['properties']['volumeAttachment'][
+ 'properties'].update({'tag': {'type': ['string', 'null']}})
+show_volume_attachment['response_body']['properties'][
+ 'volumeAttachment']['required'].append('tag')
+
+list_volume_attachments = copy.deepcopy(servers2_1.list_volume_attachments)
+list_volume_attachments['response_body']['properties']['volumeAttachments'][
+ 'items']['properties'].update({'tag': {'type': ['string', 'null']}})
+list_volume_attachments['response_body']['properties'][
+ 'volumeAttachments']['items']['required'].append('tag')
+
+# TODO(mriedem): Handle the os-interface changes when there is a test that
+# needs them from this microversion onward.
+
+# NOTE(lajoskatona): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.63 ***
+list_servers_detail = copy.deepcopy(servers263.list_servers_detail)
+rebuild_server = copy.deepcopy(servers263.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers263.rebuild_server_with_admin_pass)
+update_server = copy.deepcopy(servers263.update_server)
+get_server = copy.deepcopy(servers263.get_server)
+list_servers = copy.deepcopy(servers263.list_servers)
+show_server_diagnostics = copy.deepcopy(servers263.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers263.get_remote_consoles)
+list_tags = copy.deepcopy(servers263.list_tags)
+update_all_tags = copy.deepcopy(servers263.update_all_tags)
+delete_all_tags = copy.deepcopy(servers263.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers263.check_tag_existence)
+update_tag = copy.deepcopy(servers263.update_tag)
+delete_tag = copy.deepcopy(servers263.delete_tag)
diff --git a/tempest/lib/api_schema/response/compute/v2_71/__init__.py b/tempest/lib/api_schema/response/compute/v2_71/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_71/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
new file mode 100644
index 0000000..5cf0f8a
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_70 import servers as servers270
+
+
+###########################################################################
+#
+# 2.71:
+#
+# The server_groups parameter will be in the response body of the following
+# APIs to list the server groups to which the server belongs:
+#
+# - GET /servers/{server_id} (show)
+# - PUT /servers/{server_id} (update)
+# - POST /servers/{server_id}/action (rebuild)
+#
+###########################################################################
+
+# The "server_groups" parameter will always be present and contain at most one
+# UUID entry.
+server_groups = {
+ 'type': 'array',
+ 'minItems': 0,
+ 'maxItems': 1,
+ 'items': {
+ 'type': 'string',
+ 'format': 'uuid'
+ }
+}
+
+rebuild_server = copy.deepcopy(servers270.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+ 'properties'].update({'server_groups': server_groups})
+rebuild_server['response_body']['properties']['server'][
+ 'required'].append('server_groups')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers270.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'properties'].update({'server_groups': server_groups})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'required'].append('server_groups')
+
+update_server = copy.deepcopy(servers270.update_server)
+update_server['response_body']['properties']['server'][
+ 'properties'].update({'server_groups': server_groups})
+update_server['response_body']['properties']['server'][
+ 'required'].append('server_groups')
+
+get_server = copy.deepcopy(servers270.get_server)
+get_server['response_body']['properties']['server'][
+ 'properties'].update({'server_groups': server_groups})
+get_server['response_body']['properties']['server'][
+ 'required'].append('server_groups')
+
+# NOTE(lajoskatona): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.70 ***
+list_servers_detail = copy.deepcopy(servers270.list_servers_detail)
+list_servers = copy.deepcopy(servers270.list_servers)
+show_server_diagnostics = copy.deepcopy(servers270.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers270.get_remote_consoles)
+list_tags = copy.deepcopy(servers270.list_tags)
+update_all_tags = copy.deepcopy(servers270.update_all_tags)
+delete_all_tags = copy.deepcopy(servers270.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers270.check_tag_existence)
+update_tag = copy.deepcopy(servers270.update_tag)
+delete_tag = copy.deepcopy(servers270.delete_tag)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/__init__.py b/tempest/lib/api_schema/response/compute/v2_73/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_73/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
new file mode 100644
index 0000000..6e491e9
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_71 import servers as servers271
+
+
+###########################################################################
+#
+# 2.73:
+#
+# The locked_reason parameter is now returned in the response body of the
+# following calls:
+#
+# - POST /servers/{server_id}/action (where the action is rebuild)
+# - PUT /servers/{server_id} (update)
+# - GET /servers/{server_id} (show)
+# - GET /servers/detail (list)
+#
+###########################################################################
+
+# The "locked_reason" parameter will either be a string or None.
+locked_reason = {'type': ['string', 'null']}
+
+rebuild_server = copy.deepcopy(servers271.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+rebuild_server['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers271.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+update_server = copy.deepcopy(servers271.update_server)
+update_server['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+update_server['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+get_server = copy.deepcopy(servers271.get_server)
+get_server['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+get_server['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+list_servers_detail = copy.deepcopy(servers271.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({'locked_reason': locked_reason})
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('locked_reason')
+
+# NOTE(lajoskatona): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.71 ***
+list_servers = copy.deepcopy(servers271.list_servers)
+show_server_diagnostics = copy.deepcopy(servers271.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers271.get_remote_consoles)
+list_tags = copy.deepcopy(servers271.list_tags)
+update_all_tags = copy.deepcopy(servers271.update_all_tags)
+delete_all_tags = copy.deepcopy(servers271.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers271.check_tag_existence)
+update_tag = copy.deepcopy(servers271.update_tag)
+delete_tag = copy.deepcopy(servers271.delete_tag)
diff --git a/tempest/lib/api_schema/response/compute/v2_8/servers.py b/tempest/lib/api_schema/response/compute/v2_8/servers.py
index df7847f..3dbab3f 100644
--- a/tempest/lib/api_schema/response/compute/v2_8/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_8/servers.py
@@ -35,3 +35,6 @@
rebuild_server_with_admin_pass = copy.deepcopy(
servers.rebuild_server_with_admin_pass)
show_server_diagnostics = copy.deepcopy(servers.show_server_diagnostics)
+attach_volume = copy.deepcopy(servers.attach_volume)
+show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_9/servers.py b/tempest/lib/api_schema/response/compute/v2_9/servers.py
index 55f8e75..ee0313d 100644
--- a/tempest/lib/api_schema/response/compute/v2_9/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_9/servers.py
@@ -54,3 +54,6 @@
list_servers = copy.deepcopy(servers.list_servers)
show_server_diagnostics = copy.deepcopy(servers.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(servers.get_remote_consoles)
+attach_volume = copy.deepcopy(servers.attach_volume)
+show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/volume/capabilities.py b/tempest/lib/api_schema/response/volume/capabilities.py
new file mode 100644
index 0000000..ec60fc3
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/capabilities.py
@@ -0,0 +1,55 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+property_info = {
+ 'type': 'object',
+ 'properties': {
+ 'type': {'type': 'string'},
+ 'description': {'type': 'string'},
+ 'title': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['type', 'description', 'title']
+}
+
+show_backend_capabilities = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'pool_name': {'type': ['string', 'null']},
+ 'description': {'type': ['string', 'null']},
+ 'volume_backend_name': {'type': 'string'},
+ 'namespace': {'type': 'string',
+ 'pattern': '^OS::Storage::Capabilities::.+$'},
+ 'visibility': {'type': ['string', 'null']},
+ 'driver_version': {'type': 'string'},
+ 'vendor_name': {'type': 'string'},
+ 'properties': {
+ 'type': 'object',
+ 'properties': {
+ '^.+$': property_info
+ },
+ },
+ 'storage_protocol': {'type': 'string'},
+ 'replication_targets': {'type': 'array'},
+ 'display_name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['pool_name', 'volume_backend_name', 'namespace',
+ 'visibility', 'driver_version', 'vendor_name',
+ 'properties', 'storage_protocol', 'replication_targets',
+ 'display_name', 'description']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/encryption_types.py b/tempest/lib/api_schema/response/volume/encryption_types.py
new file mode 100755
index 0000000..7e7ca4a
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/encryption_types.py
@@ -0,0 +1,95 @@
+# Copyright 2019 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+show_encryption_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': ['object', 'null'],
+ 'properties': {
+ 'volume_type_id': {'type': 'string', 'format': 'uuid'},
+ 'encryption_id': {'type': 'string', 'format': 'uuid'},
+ 'key_size': {'type': ['integer', 'null']},
+ 'provider': {'type': 'string'},
+ 'control_location': {'enum': ['front-end', 'back-end']},
+ 'cipher': {'type': ['string', 'null']},
+ 'deleted': {'type': 'boolean'},
+ 'created_at': parameter_types.date_time,
+ 'updated_at': parameter_types.date_time_or_null,
+ 'deleted_at': parameter_types.date_time_or_null
+ },
+ # result of show_encryption_type may be empty list,
+ # so no required fields.
+ 'additionalProperties': False,
+ }
+}
+
+show_encryption_specs_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
+}
+
+create_encryption_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'encryption': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type_id': {'type': 'string', 'format': 'uuid'},
+ 'encryption_id': {'type': 'string', 'format': 'uuid'},
+ 'key_size': {'type': ['integer', 'null']},
+ 'provider': {'type': 'string'},
+ 'control_location': {'enum': ['front-end', 'back-end']},
+ 'cipher': {'type': ['string', 'null']},
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type_id', 'encryption_id']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['encryption']
+ }
+}
+
+delete_encryption_type = {'status_code': [202]}
+
+update_encryption_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'encryption': {
+ 'type': 'object',
+ 'properties': {
+ 'key_size': {'type': ['integer', 'null']},
+ 'provider': {'type': 'string'},
+ 'control_location': {'enum': ['front-end', 'back-end']},
+ 'cipher': {'type': ['string', 'null']},
+ },
+ # all fields are optional
+ 'additionalProperties': False,
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['encryption']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/extensions.py b/tempest/lib/api_schema/response/volume/extensions.py
new file mode 100644
index 0000000..8dcb07d
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/extensions.py
@@ -0,0 +1,43 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+list_extensions = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extensions': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'updated': parameter_types.date_time,
+ 'description': {'type': 'string'},
+ 'links': {'type': 'array'},
+ 'namespace': {'type': 'string'},
+ 'alias': {'type': 'string'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['updated', 'links', 'alias', 'name',
+ 'description']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['extensions'],
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/group_types.py b/tempest/lib/api_schema/response/volume/group_types.py
new file mode 100644
index 0000000..bcfa32e
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/group_types.py
@@ -0,0 +1,122 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+group_specs = {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+}
+
+common_show_group_type = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'group_specs': group_specs,
+ 'description': {'type': ['string', 'null']},
+ 'name': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['id', 'is_public', 'description', 'name']
+}
+
+create_group_type = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'group_type': common_show_group_type
+ },
+ 'additionalProperties': False,
+ 'required': ['group_type']
+ }
+}
+
+delete_group_type = {'status_code': [202]}
+
+list_group_types = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'group_types': {
+ 'type': 'array',
+ 'items': common_show_group_type
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['group_types'],
+ }
+}
+
+show_group_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'group_type': common_show_group_type
+ },
+ 'additionalProperties': False,
+ 'required': ['group_type']
+ }
+}
+
+update_group_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'group_type': common_show_group_type
+ },
+ 'additionalProperties': False,
+ 'required': ['group_type']
+ }
+}
+
+create_or_update_group_type_specs = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'group_specs': group_specs,
+ },
+ 'additionalProperties': False,
+ 'required': ['group_specs']
+ }
+}
+
+list_group_type_specs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'group_specs': group_specs,
+ },
+ 'additionalProperties': False,
+ 'required': ['group_specs']
+ }
+}
+
+show_group_type_specs_item = {
+ 'status_code': [200],
+ 'response_body': group_specs
+}
+
+update_group_type_specs_item = {
+ 'status_code': [200],
+ 'response_body': group_specs
+}
+
+delete_group_type_specs_item = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/hosts.py b/tempest/lib/api_schema/response/volume/hosts.py
new file mode 100644
index 0000000..ce67e9f
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/hosts.py
@@ -0,0 +1,81 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+show_host = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'resource': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_count': {'type': 'string'},
+ 'total_volume_gb': {'type': 'string'},
+ 'total_snapshot_gb': {'type': 'string'},
+ 'project': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'snapshot_count': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_count', 'total_volume_gb',
+ 'total_snapshot_gb', 'project',
+ 'host', 'snapshot_count'],
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['resource']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['host']
+ }
+}
+
+list_hosts = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hosts': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'service-status': {
+ 'enum': ['available', 'unavailable']},
+ 'service': {'type': 'string'},
+ 'zone': {'type': 'string'},
+ 'service-state': {
+ 'enum': ['enabled', 'disabled']},
+ 'host_name': {'type': 'string'},
+ 'last-update': parameter_types.date_time_or_null
+ },
+ 'additionalProperties': False,
+ 'required': ['service-status', 'service', 'zone',
+ 'service-state', 'host_name', 'last-update']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['hosts']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/limits.py b/tempest/lib/api_schema/response/volume/limits.py
new file mode 100644
index 0000000..99af180
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/limits.py
@@ -0,0 +1,55 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+show_limits = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'limits': {
+ 'type': 'object',
+ 'properties': {
+ 'rate': {'type': 'array'},
+ 'absolute': {
+ 'type': 'object',
+ 'properties': {
+ 'totalSnapshotsUsed': {'type': 'integer'},
+ 'maxTotalBackups': {'type': 'integer'},
+ 'maxTotalVolumeGigabytes': {'type': 'integer'},
+ 'maxTotalSnapshots': {'type': 'integer'},
+ 'maxTotalBackupGigabytes': {'type': 'integer'},
+ 'totalBackupGigabytesUsed': {'type': 'integer'},
+ 'maxTotalVolumes': {'type': 'integer'},
+ 'totalVolumesUsed': {'type': 'integer'},
+ 'totalBackupsUsed': {'type': 'integer'},
+ 'totalGigabytesUsed': {'type': 'integer'},
+ },
+ 'additionalProperties': False,
+ 'required': ['totalSnapshotsUsed', 'maxTotalBackups',
+ 'maxTotalVolumeGigabytes',
+ 'maxTotalSnapshots',
+ 'maxTotalBackupGigabytes',
+ 'totalBackupGigabytesUsed',
+ 'maxTotalVolumes', 'totalVolumesUsed',
+ 'totalBackupsUsed', 'totalGigabytesUsed']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['rate', 'absolute'],
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['limits']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/manage_snapshot.py b/tempest/lib/api_schema/response/volume/manage_snapshot.py
new file mode 100644
index 0000000..bbb9ee2
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/manage_snapshot.py
@@ -0,0 +1,49 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+manage_snapshot = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshot': {
+ 'type': 'object',
+ 'properties': {
+ 'status': {'type': 'string'},
+ 'size': {'type': 'integer'},
+ 'metadata': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ },
+ 'name': {'type': ['string', 'null']},
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'created_at': parameter_types.date_time,
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'updated_at': parameter_types.date_time_or_null
+ },
+ 'additionalProperties': False,
+ 'required': ['status', 'size', 'volume_id',
+ 'created_at', 'description', 'id', 'updated_at']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshot']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/messages.py b/tempest/lib/api_schema/response/volume/messages.py
new file mode 100644
index 0000000..381f542
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/messages.py
@@ -0,0 +1,64 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+delete_message = {
+ 'status_code': [204],
+}
+
+common_show_message = {
+ 'type': 'object',
+ 'properties': {
+ 'request_id': {'type': 'string'},
+ 'message_level': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'event_id': {'type': 'string'},
+ 'created_at': parameter_types.date_time,
+ 'guaranteed_until': parameter_types.date_time,
+ 'resource_uuid': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'resource_type': {'type': 'string'},
+ 'user_message': {'type': 'string'}},
+ 'additionalProperties': False,
+ 'required': ['request_id', 'message_level', 'event_id', 'created_at',
+ 'id', 'user_message'],
+}
+
+show_message = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'message': common_show_message
+ },
+ 'additionalProperties': False,
+ 'required': ['message']
+ }
+}
+
+list_messages = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'messages': {
+ 'type': 'array',
+ 'items': common_show_message
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['messages']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/qos.py b/tempest/lib/api_schema/response/volume/qos.py
new file mode 100644
index 0000000..d1b3910
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/qos.py
@@ -0,0 +1,123 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+show_qos = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_specs': {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'consumer': {'type': 'string'},
+ 'specs': {'type': ['object', 'null']},
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'id', 'specs']
+ },
+ 'links': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'href': {'type': 'string',
+ 'format': 'uri'},
+ 'rel': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['href', 'rel']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_specs', 'links']
+ }
+}
+
+delete_qos = {'status_code': [202]}
+
+list_qos = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_specs': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'specs': {
+ 'type': 'object',
+ 'patternProperties': {'^.+$': {'type': 'string'}}
+ },
+ 'consumer': {'type': 'string'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['specs', 'id', 'name']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_specs']
+ }
+}
+
+set_qos_key = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_specs': {
+ 'type': 'object',
+ 'patternProperties': {'^.+$': {'type': 'string'}}
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_specs']
+ }
+}
+
+unset_qos_key = {'status_code': [202]}
+associate_qos = {'status_code': [202]}
+
+show_association_qos = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_associations': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'association_type': {'type': 'string'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['association_type', 'id', 'name']
+ }
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_associations']
+ }
+}
+
+disassociate_qos = {'status_code': [202]}
+disassociate_all_qos = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/quota_classes.py b/tempest/lib/api_schema/response/volume/quota_classes.py
new file mode 100644
index 0000000..1a575d2
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/quota_classes.py
@@ -0,0 +1,68 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+show_quota_classes = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_class_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'volumes': {'type': 'integer'},
+ 'snapshots': {'type': 'integer'},
+ 'backups': {'type': 'integer'},
+ 'groups': {'type': 'integer'},
+ 'per_volume_gigabytes': {'type': 'integer'},
+ 'gigabytes': {'type': 'integer'},
+ 'backup_gigabytes': {'type': 'integer'},
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": {'type': 'integer'},
+ 'required': ['id', 'volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes'],
+ }
+ },
+ 'required': ['quota_class_set']
+ }
+}
+
+update_quota_classes = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_class_set': {
+ 'type': 'object',
+ 'properties': {
+ 'volumes': {'type': 'integer'},
+ 'snapshots': {'type': 'integer'},
+ 'backups': {'type': 'integer'},
+ 'groups': {'type': 'integer'},
+ 'per_volume_gigabytes': {'type': 'integer'},
+ 'gigabytes': {'type': 'integer'},
+ 'backup_gigabytes': {'type': 'integer'},
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": {'type': 'integer'},
+ 'required': ['volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes'],
+ }
+ },
+ 'required': ['quota_class_set']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/quotas.py b/tempest/lib/api_schema/response/volume/quotas.py
new file mode 100644
index 0000000..4be584c
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/quotas.py
@@ -0,0 +1,92 @@
+# Copyright 2019 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+delete_quota_set = {
+ 'status_code': [200],
+}
+
+quota_usage_info = {
+ 'type': 'object',
+ 'properties': {
+ 'reserved': {'type': 'integer'},
+ 'allocated': {'type': 'integer'},
+ 'limit': {'type': 'integer'},
+ 'in_use': {'type': 'integer'}
+ },
+ 'additionalProperties': False,
+ # 'allocated' attribute is available only when nested quota is enabled.
+ 'required': ['reserved', 'limit', 'in_use'],
+}
+
+show_quota_set = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'volumes': {'type': 'integer'},
+ 'snapshots': {'type': 'integer'},
+ 'backups': {'type': 'integer'},
+ 'groups': {'type': 'integer'},
+ 'per_volume_gigabytes': {'type': 'integer'},
+ 'gigabytes': {'type': 'integer'},
+ 'backup_gigabytes': {'type': 'integer'},
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": {'type': 'integer'},
+ 'required': ['id', 'volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes', 'groups'],
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
+
+update_quota_set = copy.deepcopy(show_quota_set)
+update_quota_set['response_body']['properties']['quota_set'][
+ 'required'].remove('id')
+
+show_quota_set_usage = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'volumes': quota_usage_info,
+ 'snapshots': quota_usage_info,
+ 'backups': quota_usage_info,
+ 'groups': quota_usage_info,
+ 'per_volume_gigabytes': quota_usage_info,
+ 'gigabytes': quota_usage_info,
+ 'backup_gigabytes': quota_usage_info,
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": quota_usage_info,
+ 'required': ['id', 'volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes', 'groups'],
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/services.py b/tempest/lib/api_schema/response/volume/services.py
new file mode 100644
index 0000000..70de878
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/services.py
@@ -0,0 +1,92 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+list_services = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'services': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'binary': {'type': 'string'},
+ 'disabled_reason': {'type': ['string', 'null']},
+ 'host': {'type': 'string'},
+ 'state': {'enum': ['up', 'down']},
+ 'status': {'enum': ['enabled', 'disabled']},
+ 'frozen': {'type': 'boolean'},
+ 'updated_at': parameter_types.date_time,
+ 'zone': {'type': 'string'},
+ # TODO(zhufl): cluster is added in 3.7, we should move
+ # it to the 3.7 schema file when microversion is
+ # supported in volume interfaces
+ 'cluster': {'type': 'string'},
+ 'replication_status': {'type': 'string'},
+ 'active_backend_id': {'type': ['string', 'null']},
+ 'backend_state': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['binary', 'disabled_reason', 'host', 'state',
+ 'status', 'updated_at', 'zone']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['services']
+ }
+}
+
+enable_service = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'disabled': {'type': 'boolean'},
+ 'status': {'enum': ['enabled', 'disabled']},
+ 'host': {'type': 'string'},
+ 'service': {'type': 'string'},
+ 'binary': {'type': 'string'},
+ 'disabled_reason': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['disabled', 'status', 'host', 'service',
+ 'binary', 'disabled_reason']
+ }
+}
+
+disable_service = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'disabled': {'type': 'boolean'},
+ 'status': {'enum': ['enabled', 'disabled']},
+ 'host': {'type': 'string'},
+ 'service': {'type': 'string'},
+ 'binary': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['disabled', 'status', 'host', 'service', 'binary']
+ }
+}
+
+disable_log_reason = copy.deepcopy(enable_service)
+
+freeze_host = {'status_code': [200]}
+thaw_host = {'status_code': [200]}
diff --git a/tempest/lib/api_schema/response/volume/snapshots.py b/tempest/lib/api_schema/response/volume/snapshots.py
new file mode 100644
index 0000000..e9aeb64
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/snapshots.py
@@ -0,0 +1,198 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+metadata = {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+}
+
+common_snapshot_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'status': {'type': 'string'},
+ 'description': {'type': ['string', 'null']},
+ 'created_at': parameter_types.date_time,
+ 'name': {'type': 'string'},
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'metadata': metadata,
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'size': {'type': 'integer'},
+ 'updated_at': parameter_types.date_time_or_null,
+ # TODO(zhufl): user_id is added in 3.41, we should move it
+ # to the 3.41 schema file when microversion is supported
+ # in volume interfaces
+ # 'user_id': {'type': 'string', 'format': 'uuid'}
+ },
+ 'additionalProperties': False,
+ 'required': ['status', 'description', 'created_at', 'metadata',
+ 'name', 'volume_id', 'id', 'size', 'updated_at']
+}
+
+common_snapshot_detail_schema = copy.deepcopy(common_snapshot_schema)
+common_snapshot_detail_schema['properties'].update(
+ {'os-extended-snapshot-attributes:progress': {'type': 'string'},
+ 'os-extended-snapshot-attributes:project_id': {
+ 'type': 'string', 'format': 'uuid'}})
+common_snapshot_detail_schema['required'].extend(
+ ['os-extended-snapshot-attributes:progress',
+ 'os-extended-snapshot-attributes:project_id'])
+
+list_snapshots_no_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshots': {
+ 'type': 'array',
+ 'items': common_snapshot_schema
+ },
+ 'snapshots_links': parameter_types.links,
+ # TODO(zhufl): count is added in 3.45, we should move
+ # it to the 3.45 schema file when microversion is
+ # supported in volume interfaces
+ # 'count': {'type': 'integer'}
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshots'],
+ }
+}
+
+list_snapshots_with_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshots': {
+ 'type': 'array',
+ 'items': common_snapshot_detail_schema
+ },
+ 'snapshots_links': parameter_types.links,
+ # TODO(zhufl): count is added in 3.45, we should move
+ # it to the 3.45 schema file when microversion is
+ # supported in volume interfaces
+ # 'count': {'type': 'integer'},
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshots'],
+ }
+}
+
+show_snapshot = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshot': common_snapshot_detail_schema
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshot'],
+ }
+}
+
+create_snapshot = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshot': common_snapshot_schema
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshot'],
+ }
+}
+
+update_snapshot = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshot': common_snapshot_schema
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshot'],
+ }
+}
+
+delete_snapshot = {'status_code': [202]}
+reset_snapshot_status = {'status_code': [202]}
+update_snapshot_status = {'status_code': [202]}
+
+create_snapshot_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': metadata
+ },
+ 'additionalProperties': False,
+ 'required': ['metadata'],
+ }
+}
+
+show_snapshot_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': metadata
+ },
+ 'additionalProperties': False,
+ 'required': ['metadata'],
+ }
+}
+
+update_snapshot_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': metadata
+ },
+ 'additionalProperties': False,
+ 'required': ['metadata'],
+ }
+}
+
+show_snapshot_metadata_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'meta': metadata
+ },
+ 'additionalProperties': False,
+ 'required': ['meta'],
+ }
+}
+
+update_snapshot_metadata_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'meta': metadata
+ },
+ 'additionalProperties': False,
+ 'required': ['meta'],
+ }
+}
+
+delete_snapshot_metadata_item = {'status_code': [200]}
+force_delete_snapshot = {'status_code': [202]}
+unmanage_snapshot = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/transfers.py b/tempest/lib/api_schema/response/volume/transfers.py
new file mode 100644
index 0000000..d1d1b68
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/transfers.py
@@ -0,0 +1,129 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+create_volume_transfer = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfer': {
+ 'type': 'object',
+ 'properties': {
+ 'auth_key': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'created_at': parameter_types.date_time,
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['auth_key', 'links', 'created_at',
+ 'volume_id', 'id', 'name']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfer']
+ }
+}
+
+common_show_volume_transfer = {
+ 'type': 'object',
+ 'properties': {
+ 'links': parameter_types.links,
+ 'created_at': parameter_types.date_time,
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['links', 'created_at', 'volume_id', 'id', 'name']
+}
+
+show_volume_transfer = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfer': common_show_volume_transfer
+ },
+ 'additionalProperties': False,
+ 'required': ['transfer']
+ }
+}
+
+list_volume_transfers_no_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfers': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'links': parameter_types.links,
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_id', 'id', 'links', 'name']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfers'],
+ }
+}
+
+list_volume_transfers_with_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfers': {
+ 'type': 'array',
+ 'items': common_show_volume_transfer
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfers'],
+ }
+}
+
+delete_volume_transfer = {'status_code': [202]}
+
+accept_volume_transfer = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfer': {
+ 'type': 'object',
+ 'properties': {
+ 'links': parameter_types.links,
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['links', 'volume_id', 'id', 'name']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfer']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/versions.py b/tempest/lib/api_schema/response/volume/versions.py
old mode 100644
new mode 100755
index 2391a8c..c845f7f
--- a/tempest/lib/api_schema/response/volume/versions.py
+++ b/tempest/lib/api_schema/response/volume/versions.py
@@ -58,3 +58,49 @@
'required': ['versions'],
}
}
+
+volume_api_version_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'versions': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'status': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'id': {'type': 'string'},
+ 'links': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'href': {'type': 'string',
+ 'format': 'uri'},
+ 'rel': {'type': 'string'},
+ 'type': {'type': 'string'},
+ },
+ 'required': ['href', 'rel']
+ }
+ },
+ 'min_version': {'type': 'string'},
+ 'version': {'type': 'string'},
+ 'media-types': {
+ 'type': 'array',
+ 'properties': {
+ 'base': {'type': 'string'},
+ 'type': {'type': 'string'}
+ },
+ 'required': ['base', 'type']
+ }
+ },
+ 'required': ['status', 'updated', 'id', 'links',
+ 'min_version', 'version', 'media-types']
+ }
+ }
+ },
+ 'required': ['versions'],
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/volume_types.py b/tempest/lib/api_schema/response/volume/volume_types.py
new file mode 100644
index 0000000..51b3a72
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/volume_types.py
@@ -0,0 +1,176 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+extra_specs_info = {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+}
+
+common_show_volume_type = {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info,
+ 'name': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'os-volume-type-access:is_public': {'type': 'boolean'},
+ 'qos_specs_id': {'type': ['string', 'null'], 'format': 'uuid'}
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'is_public', 'description', 'id',
+ 'os-volume-type-access:is_public']
+}
+
+show_volume_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type': common_show_volume_type,
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type']
+ }
+}
+
+delete_volume_type = {'status_code': [202]}
+
+create_volume_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info,
+ 'name': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'os-volume-type-access:is_public': {'type': 'boolean'}
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'is_public', 'id',
+ 'description', 'os-volume-type-access:is_public']
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type']
+ }
+}
+
+list_volume_types = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_types': {
+ 'type': 'array',
+ 'items': common_show_volume_type
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_types']
+ }
+}
+
+list_volume_types_extra_specs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info
+ },
+ 'additionalProperties': False,
+ 'required': ['extra_specs']
+ }
+}
+
+show_volume_types_extra_specs = {
+ 'status_code': [200],
+ 'response_body': extra_specs_info
+}
+
+create_volume_types_extra_specs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info
+ },
+ 'additionalProperties': False,
+ 'required': ['extra_specs']
+ }
+}
+
+delete_volume_types_extra_specs = {'status_code': [202]}
+
+update_volume_types = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info,
+ 'name': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'}
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'is_public', 'description', 'id']
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type']
+ }
+}
+
+update_volume_type_extra_specs = {
+ 'status_code': [200],
+ 'response_body': extra_specs_info
+}
+
+add_type_access = {'status_code': [202]}
+
+remove_type_access = {'status_code': [202]}
+
+list_type_access = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type_access': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type_id': {'type': 'string', 'format': 'uuid'},
+ 'project_id': {'type': 'string', 'format': 'uuid'},
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type_id', 'project_id']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type_access']
+ }
+}
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 2dd9d00..3fee489 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -324,7 +324,7 @@
pass
if expiry is None:
raise ValueError(
- "time data '{data}' does not match any of the"
+ "time data '{data}' does not match any of the "
"expected formats: {formats}".format(
data=expiry_string, formats=self.EXPIRY_DATE_FORMATS))
return expiry
@@ -684,7 +684,7 @@
def __str__(self):
"""Represent only attributes included in self.ATTRIBUTES"""
- attrs = [attr for attr in self.ATTRIBUTES if attr is not 'password']
+ attrs = [attr for attr in self.ATTRIBUTES if attr != 'password']
_repr = dict((k, getattr(self, k)) for k in attrs)
return str(_repr)
@@ -741,7 +741,7 @@
def __str__(self):
"""Represent only attributes included in self.ATTRIBUTES"""
- attrs = [attr for attr in self.ATTRIBUTES if attr is not 'password']
+ attrs = [attr for attr in self.ATTRIBUTES if attr != 'password']
_repr = dict((k, getattr(self, k)) for k in attrs)
return str(_repr)
diff --git a/tempest/lib/base.py b/tempest/lib/base.py
index 3be55c0..74ae77c 100644
--- a/tempest/lib/base.py
+++ b/tempest/lib/base.py
@@ -14,11 +14,29 @@
# under the License.
import os
+import sys
import fixtures
+import pkg_resources
import testtools
+def _handle_skip_exception():
+ try:
+ stestr_version = pkg_resources.parse_version(
+ pkg_resources.get_distribution("stestr").version)
+ stestr_min = pkg_resources.parse_version('2.5.0')
+ new_stestr = (stestr_version >= stestr_min)
+ import unittest
+ import unittest2
+ if sys.version_info >= (3, 5) and new_stestr:
+ testtools.TestCase.skipException = unittest.case.SkipTest
+ else:
+ testtools.TestCase.skipException = unittest2.case.SkipTest
+ except Exception:
+ pass
+
+
class BaseTestCase(testtools.testcase.WithAttributes, testtools.TestCase):
setUpClassCalled = False
@@ -33,6 +51,18 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
+ # TODO(gmann): cls.handle_skip_exception is really workaround for
+ # testtools bug- https://github.com/testing-cabal/testtools/issues/272
+ # stestr which is used by Tempest internally to run the test switch
+ # the customize test runner(which use stdlib unittest) for >=py3.5
+ # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
+ # These two test runner are not compatible due to skip exception
+ # handling(due to unittest2). testtools.run treat unittestt.SkipTest
+ # as error and stdlib unittest treat unittest2.case.SkipTest raised
+ # by testtools.TestCase.skipException.
+ # The below workaround can be removed once testtools fix issue# 272.
+ cls.orig_skip_exception = testtools.TestCase.skipException
+ _handle_skip_exception()
@classmethod
def tearDownClass(cls):
@@ -40,6 +70,7 @@
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
+ testtools.TestCase.skipException = self.orig_skip_exception
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's "
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 82fcd0b..71ecb32 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -110,7 +110,7 @@
for item in files:
if item.endswith('.py'):
module_name = '.'.join((root_package,
- os.path.splitext(item)[0]))
+ os.path.splitext(item)[0]))
if not module_name.startswith(UNIT_TESTS_EXCLUDE):
modules.append(module_name)
return modules
@@ -233,8 +233,8 @@
if self._is_test_case(module, node))
for node in test_cases:
for subnode in filter(self._is_test_method, node.body):
- test_name = '%s.%s' % (node.name, subnode.name)
- tests[module_name]['tests'][test_name] = subnode
+ test_name = '%s.%s' % (node.name, subnode.name)
+ tests[module_name]['tests'][test_name] = subnode
return tests
@staticmethod
diff --git a/tempest/lib/common/api_microversion_fixture.py b/tempest/lib/common/api_microversion_fixture.py
new file mode 100644
index 0000000..3837138
--- /dev/null
+++ b/tempest/lib/common/api_microversion_fixture.py
@@ -0,0 +1,82 @@
+# Copyright 2019 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from tempest.lib.services.compute import base_compute_client
+from tempest.lib.services.placement import base_placement_client
+from tempest.lib.services.volume import base_client as base_volume_client
+
+
+class APIMicroversionFixture(fixtures.Fixture):
+ """API Microversion Fixture to set service microversion.
+
+ This class provides the fixture to set and reset the microversion
+ on service client. Service client has global variable to set the
+ microversion for that service API request.
+ For example: base_compute_client.COMPUTE_MICROVERSION
+ Global variable is always risky to set directly which can affect the
+ other test's API request also. This class provides a way to reset the
+ service microversion once test finish the API request.
+ This class can be used with useFixture: Example::
+
+ def setUp(self):
+ super(BaseV2ComputeTest, self).setUp()
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ compute_microversion=self.compute_request_microversion))
+
+ Or you can set microversion on multiple services together::
+
+ def setUp(self):
+ super(ScenarioTest, self).setUp()
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ compute_microversion=self.compute_request_microversion,
+ volume_microversion=self.volume_request_microversion))
+
+ Current supported services:
+ - Compute
+ - Volume
+ - Placement
+
+ :param str compute_microversion: microvesion to be set on compute
+ service clients
+ :param str volume_microversion: microvesion to be set on volume
+ service clients
+ :param str placement_microversion: microvesion to be set on placement
+ service clients
+ """
+
+ def __init__(self, compute_microversion=None, volume_microversion=None,
+ placement_microversion=None):
+ self.compute_microversion = compute_microversion
+ self.volume_microversion = volume_microversion
+ self.placement_microversion = placement_microversion
+
+ def _setUp(self):
+ super(APIMicroversionFixture, self)._setUp()
+ if self.compute_microversion:
+ base_compute_client.COMPUTE_MICROVERSION = (
+ self.compute_microversion)
+ if self.volume_microversion:
+ base_volume_client.VOLUME_MICROVERSION = self.volume_microversion
+ if self.placement_microversion:
+ base_placement_client.PLACEMENT_MICROVERSION = (
+ self.placement_microversion)
+
+ self.addCleanup(self._reset_microversion)
+
+ def _reset_microversion(self):
+ base_compute_client.COMPUTE_MICROVERSION = None
+ base_volume_client.VOLUME_MICROVERSION = None
+ base_placement_client.PLACEMENT_MICROVERSION = None
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index bcb076b..80dbc1d 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
import testtools
from tempest.lib.common import api_version_request
@@ -31,6 +32,10 @@
# (min_microversion, max_microversion) on each test class if necessary.
min_microversion = None
max_microversion = LATEST_MICROVERSION
+ volume_min_microversion = None
+ volume_max_microversion = LATEST_MICROVERSION
+ placement_min_microversion = None
+ placement_max_microversion = LATEST_MICROVERSION
def check_skip_with_microversion(test_min_version, test_max_version,
@@ -54,7 +59,7 @@
config_min_version = api_version_request.APIVersionRequest(cfg_min_version)
config_max_version = api_version_request.APIVersionRequest(cfg_max_version)
if ((min_version > max_version) or
- (config_min_version > config_max_version)):
+ (config_min_version > config_max_version)):
msg = ("Test Class versions [%s - %s]. "
"Configuration versions [%s - %s]."
% (min_version.get_string(),
@@ -108,10 +113,12 @@
:param api_microversion_header_name: Microversion header name
Example- "X-OpenStack-Nova-API-Version"
- :param api_microversion: Microversion number like "2.10"
+ :param api_microversion: Microversion number like "2.10", type str.
:param response_header: Response header where microversion is
expected to be present.
"""
+ if not isinstance(api_microversion, six.string_types):
+ raise TypeError('api_microversion must be a string')
api_microversion_header_name = api_microversion_header_name.lower()
if (api_microversion_header_name not in response_header or
api_microversion != response_header[api_microversion_header_name]):
diff --git a/tempest/lib/common/fixed_network.py b/tempest/lib/common/fixed_network.py
index 875a79d..926c3a4 100644
--- a/tempest/lib/common/fixed_network.py
+++ b/tempest/lib/common/fixed_network.py
@@ -24,7 +24,7 @@
"""Get a full network dict from just a network name
:param str name: the name of the network to use
- :param NetworksClient compute_networks_client: The network client
+ :param network.NetworksClient compute_networks_client: The network client
object to use for making the network lists api request
:return: The full dictionary for the network in question
:rtype: dict
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index 738c37f..8c1a802 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -19,7 +19,8 @@
class ClosingProxyHttp(urllib3.ProxyManager):
def __init__(self, proxy_url, disable_ssl_certificate_validation=False,
- ca_certs=None, timeout=None):
+ ca_certs=None, timeout=None, follow_redirects=True):
+ self.follow_redirects = follow_redirects
kwargs = {}
if disable_ssl_certificate_validation:
@@ -50,9 +51,14 @@
new_headers = dict(original_headers, connection='close')
new_kwargs = dict(kwargs, headers=new_headers)
- # Follow up to 5 redirections. Don't raise an exception if
- # it's exceeded but return the HTTP 3XX response instead.
- retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ if self.follow_redirects:
+ # Follow up to 5 redirections. Don't raise an exception if
+ # it's exceeded but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ else:
+ # Do not follow redirections. Don't raise an exception if
+ # a redirect is found, but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(redirect=False)
r = super(ClosingProxyHttp, self).request(method, url, retries=retry,
*args, **new_kwargs)
return Response(r), r.data
@@ -60,7 +66,8 @@
class ClosingHttp(urllib3.poolmanager.PoolManager):
def __init__(self, disable_ssl_certificate_validation=False,
- ca_certs=None, timeout=None):
+ ca_certs=None, timeout=None, follow_redirects=True):
+ self.follow_redirects = follow_redirects
kwargs = {}
if disable_ssl_certificate_validation:
@@ -93,9 +100,14 @@
new_headers = dict(original_headers, connection='close')
new_kwargs = dict(kwargs, headers=new_headers)
- # Follow up to 5 redirections. Don't raise an exception if
- # it's exceeded but return the HTTP 3XX response instead.
- retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ if self.follow_redirects:
+ # Follow up to 5 redirections. Don't raise an exception if
+ # it's exceeded but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ else:
+ # Do not follow redirections. Don't raise an exception if
+ # a redirect is found, but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(redirect=False)
r = super(ClosingHttp, self).request(method, url, retries=retry,
*args, **new_kwargs)
return Response(r), r.data
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index 9a35b76..bbf5e89 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -12,9 +12,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import base64
-
import jsonschema
+from oslo_serialization import base64
from oslo_utils import timeutils
import six
@@ -46,9 +45,7 @@
try:
if isinstance(instance, six.text_type):
instance = instance.encode('utf-8')
- base64.decodestring(instance)
- except base64.binascii.Error:
- return False
+ base64.decode_as_bytes(instance)
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index fcdeb17..1011504 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -273,7 +273,7 @@
# NOTE(andreaf) Not all fields may be available on all credentials
# so defaulting to None for that case.
if all([getattr(creds, k, None) == hash_attributes.get(k, None) for
- k in init_attributes]):
+ k in init_attributes]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
diff --git a/tempest/lib/common/profiler.py b/tempest/lib/common/profiler.py
new file mode 100644
index 0000000..1544337
--- /dev/null
+++ b/tempest/lib/common/profiler.py
@@ -0,0 +1,64 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import hashlib
+import hmac
+import json
+
+from oslo_utils import encodeutils
+from oslo_utils import uuidutils
+
+_profiler = {}
+
+
+def enable(profiler_key, trace_id=None):
+ """Enable global profiler instance
+
+ :param profiler_key: the secret key used to enable profiling in services
+ :param trace_id: unique id of the trace, if empty the id is generated
+ automatically
+ """
+ _profiler['key'] = profiler_key
+ _profiler['uuid'] = trace_id or uuidutils.generate_uuid()
+
+
+def disable():
+ """Disable global profiler instance"""
+ _profiler.clear()
+
+
+def serialize_as_http_headers():
+ """Serialize profiler state as HTTP headers
+
+ This function corresponds to the one from osprofiler library.
+ :return: dictionary with 2 keys `X-Trace-Info` and `X-Trace-HMAC`.
+ """
+ p = _profiler
+ if not p: # profiler is not enabled
+ return {}
+
+ info = {'base_id': p['uuid'], 'parent_id': p['uuid']}
+ trace_info = base64.urlsafe_b64encode(
+ encodeutils.to_utf8(json.dumps(info)))
+ trace_hmac = _sign(trace_info, p['key'])
+
+ return {
+ 'X-Trace-Info': trace_info,
+ 'X-Trace-HMAC': trace_hmac,
+ }
+
+
+def _sign(trace_info, key):
+ h = hmac.new(encodeutils.to_utf8(key), digestmod=hashlib.sha1)
+ h.update(trace_info)
+ return h.hexdigest()
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index e2fd722..431a0a0 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -27,6 +27,7 @@
from tempest.lib.common import http
from tempest.lib.common import jsonschema_validator
+from tempest.lib.common import profiler
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
@@ -70,6 +71,7 @@
:param str http_timeout: Timeout in seconds to wait for the http request to
return
:param str proxy_url: http proxy url to use.
+ :param bool follow_redirects: Set to false to stop following redirects.
"""
# The version of the API this client implements
@@ -82,7 +84,7 @@
build_interval=1, build_timeout=60,
disable_ssl_certificate_validation=False, ca_certs=None,
trace_requests='', name=None, http_timeout=None,
- proxy_url=None):
+ proxy_url=None, follow_redirects=True):
self.auth_provider = auth_provider
self.service = service
self.region = region
@@ -107,11 +109,11 @@
self.http_obj = http.ClosingProxyHttp(
proxy_url,
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
- timeout=http_timeout)
+ timeout=http_timeout, follow_redirects=follow_redirects)
else:
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
- timeout=http_timeout)
+ timeout=http_timeout, follow_redirects=follow_redirects)
def get_headers(self, accept_type=None, send_type=None):
"""Return the default headers which will be used with outgoing requests
@@ -130,8 +132,10 @@
accept_type = 'json'
if send_type is None:
send_type = 'json'
- return {'Content-Type': 'application/%s' % send_type,
- 'Accept': 'application/%s' % accept_type}
+ headers = {'Content-Type': 'application/%s' % send_type,
+ 'Accept': 'application/%s' % accept_type}
+ headers.update(profiler.serialize_as_http_headers())
+ return headers
def __str__(self):
STRING_LIMIT = 80
@@ -281,7 +285,7 @@
def get(self, url, headers=None, extra_headers=False):
"""Send a HTTP GET request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the get request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -296,7 +300,7 @@
def delete(self, url, headers=None, body=None, extra_headers=False):
"""Send a HTTP DELETE request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the delete request to
:param dict headers: The headers to use for the request
:param dict body: the request body
:param bool extra_headers: Boolean value than indicates if the headers
@@ -312,7 +316,7 @@
def patch(self, url, body, headers=None, extra_headers=False):
"""Send a HTTP PATCH request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the patch request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
@@ -328,7 +332,7 @@
def put(self, url, body, headers=None, extra_headers=False, chunked=False):
"""Send a HTTP PUT request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the put request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
@@ -345,7 +349,7 @@
def head(self, url, headers=None, extra_headers=False):
"""Send a HTTP HEAD request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the head request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -360,7 +364,7 @@
def copy(self, url, headers=None, extra_headers=False):
"""Send a HTTP COPY request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the copy request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -373,7 +377,7 @@
return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
- """Get the versions on a endpoint from the keystone catalog
+ """Get the versions on an endpoint from the keystone catalog
This method will make a GET request on the baseurl from the keystone
catalog to return a list of API versions. It is expected that a GET
@@ -525,7 +529,7 @@
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
- raise exceptions.ResponseWithEntity()
+ raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
@@ -543,24 +547,17 @@
req_url, req_headers, req_body = self.auth_provider.auth_request(
method, url, headers, body, self.filters)
- # Do the actual request, and time it
- start = time.time()
- self._log_request_start(method, req_url)
resp, resp_body = self.raw_request(
req_url, method, headers=req_headers, body=req_body,
chunked=chunked
)
- end = time.time()
- self._log_request(method, req_url, resp, secs=(end - start),
- req_headers=req_headers, req_body=req_body,
- resp_body=resp_body)
-
# Verify HTTP response codes
self.response_checker(method, resp, resp_body)
return resp, resp_body
- def raw_request(self, url, method, headers=None, body=None, chunked=False):
+ def raw_request(self, url, method, headers=None, body=None, chunked=False,
+ log_req_body=None):
"""Send a raw HTTP request without the keystone catalog or auth
This method sends a HTTP request in the same manner as the request()
@@ -576,14 +573,29 @@
explicitly requires no headers use an empty dict.
:param str body: Body to send with the request
:param bool chunked: sends the body with chunked encoding
+ :param str log_req_body: Whether to log the request body or not.
+ It is default to None which means request
+ body is safe to log otherwise pass any string
+ you want to log in place of request body.
+ For example: '<omitted>'
:rtype: tuple
:return: a tuple with the first entry containing the response headers
and the second the response body
"""
if headers is None:
headers = self.get_headers()
- return self.http_obj.request(url, method, headers=headers,
- body=body, chunked=chunked)
+ # Do the actual request, and time it
+ start = time.time()
+ self._log_request_start(method, url)
+ resp, resp_body = self.http_obj.request(
+ url, method, headers=headers,
+ body=body, chunked=chunked)
+ end = time.time()
+ req_body = body if log_req_body is None else log_req_body
+ self._log_request(method, url, resp, secs=(end - start),
+ req_headers=headers, req_body=req_body,
+ resp_body=resp_body)
+ return resp, resp_body
def request(self, method, url, extra_headers=False, headers=None,
body=None, chunked=False):
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index d4ec6ad..3a05f27 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -75,6 +75,11 @@
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
self.proxy_client = proxy_client
+ if (self.proxy_client and self.proxy_client.host == self.host and
+ self.proxy_client.port == self.port and
+ self.proxy_client.username == self.username):
+ raise exceptions.SSHClientProxyClientLoop(
+ host=self.host, port=self.port, username=self.username)
self._proxy_conn = None
def _get_ssh_connection(self, sleep=1.5, backoff=1):
@@ -114,8 +119,10 @@
ssh.close()
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
- " connection to %s@%s after %d attempts",
- self.username, self.host, attempts)
+ " connection to %s@%s after %d attempts. "
+ "Proxy client: %s",
+ self.username, self.host, attempts,
+ self._get_proxy_client_info())
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
@@ -196,11 +203,13 @@
exit_status = channel.recv_exit_status()
- if 0 != exit_status:
- raise exceptions.SSHExecCommandFailed(
- command=cmd, exit_status=exit_status,
- stderr=err_data, stdout=out_data)
- return out_data
+ ssh.close()
+
+ if 0 != exit_status:
+ raise exceptions.SSHExecCommandFailed(
+ command=cmd, exit_status=exit_status,
+ stderr=err_data, stdout=out_data)
+ return out_data
def test_connection_auth(self):
"""Raises an exception when we can not connect to server via ssh."""
@@ -217,3 +226,13 @@
cmd = 'nc %s %s' % (self.host, self.port)
chan.exec_command(cmd)
return chan
+
+ def _get_proxy_client_info(self):
+ if not self.proxy_client:
+ return 'no proxy client'
+ nested_pclient = self.proxy_client._get_proxy_client_info()
+ return ('%(username)s@%(host)s:%(port)s, nested proxy client: '
+ '%(nested_pclient)s' % {'username': self.proxy_client.username,
+ 'host': self.proxy_client.host,
+ 'port': self.proxy_client.port,
+ 'nested_pclient': nested_pclient})
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index c5df590..7f94612 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -50,8 +50,7 @@
(e.g. 'prefixfoo-namebar-154876201')
:rtype: string
"""
- randbits = str(random.randint(1, 0x7fffffff))
- rand_name = randbits
+ rand_name = str(random.randint(1, 0x7fffffff))
if name:
rand_name = name + '-' + rand_name
if prefix:
@@ -65,9 +64,9 @@
:param int length: The length of password that you expect to set
(If it's smaller than 3, it's same as 3.)
:return: a random password. The format is
- '<random upper letter>-<random number>-<random special character>
- -<random ascii letters or digit characters or special symbols>'
- (e.g. 'G2*ac8&lKFFgh%2')
+ ``'<random upper letter>-<random number>-<random special character>
+ -<random ascii letters or digit characters or special symbols>'``
+ (e.g. ``G2*ac8&lKFFgh%2``)
:rtype: string
"""
upper = random.choice(string.ascii_uppercase)
@@ -171,7 +170,7 @@
:rtype: string
"""
return b''.join([six.int2byte(random.randint(0, 255))
- for i in range(size)])
+ for i in range(size)])
# Courtesy of http://stackoverflow.com/a/312464
diff --git a/tempest/lib/common/utils/misc.py b/tempest/lib/common/utils/misc.py
index f13b4c8..a0b0c0a 100644
--- a/tempest/lib/common/utils/misc.py
+++ b/tempest/lib/common/utils/misc.py
@@ -12,11 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
-from tempest.lib.common.utils import test_utils
-
-LOG = logging.getLogger(__name__)
def singleton(cls):
@@ -28,10 +23,3 @@
instances[cls] = cls()
return instances[cls]
return getinstance
-
-
-def find_test_caller(*args, **kwargs):
- LOG.warning("tempest.lib.common.utils.misc.find_test_caller is deprecated "
- "in favor of tempest.lib.common.utils.test_utils."
- "find_test_caller")
- test_utils.find_test_caller(*args, **kwargs)
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index b399aa0..808e0fb 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -136,10 +136,17 @@
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
+
+ :param condition: Optional condition which if true will apply the attr. If
+ a condition is specified which is false the attr will not be applied to
+ the test function. If not specified, the attr is always applied.
"""
def decorator(f):
- if 'type' in kwargs and isinstance(kwargs['type'], str):
+ # Check to see if the attr should be conditional applied.
+ if 'condition' in kwargs and not kwargs.get('condition'):
+ return f
+ if 'type' in kwargs and isinstance(kwargs['type'], six.string_types):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
@@ -147,3 +154,45 @@
return f
return decorator
+
+
+def unstable_test(*args, **kwargs):
+ """A decorator useful to run tests hitting known bugs and skip it if fails
+
+ This decorator can be used in cases like:
+
+ * We have skipped tests with some bug and now bug is claimed to be fixed.
+ Now we want to check the test stability so we use this decorator.
+ The number of skipped cases with that bug can be counted to mark test
+ stable again.
+ * There is test which is failing often, but not always. If there is known
+ bug related to it, and someone is working on fix, this decorator can be
+ used instead of "skip_because". That will ensure that test is still run
+ so new debug data can be collected from jobs' logs but it will not make
+ life of other developers harder by forcing them to recheck jobs more
+ often.
+
+ ``bug`` must be a number for the test to skip.
+
+ :param bug: bug number causing the test to skip (launchpad or storyboard)
+ :param bug_type: 'launchpad' or 'storyboard', default 'launchpad'
+ :raises: testtools.TestCase.skipException if test actually fails,
+ and ``bug`` is included
+ """
+ def decor(f):
+ @functools.wraps(f)
+ def inner(self, *func_args, **func_kwargs):
+ try:
+ return f(self, *func_args, **func_kwargs)
+ except Exception as e:
+ if "bug" in kwargs:
+ bug = kwargs['bug']
+ bug_type = kwargs.get('bug_type', 'launchpad')
+ bug_url = _get_bug_url(bug, bug_type)
+ msg = ("Marked as unstable and skipped because of bug: "
+ "%s, failure was: %s") % (bug_url, e)
+ raise testtools.TestCase.skipException(msg)
+ else:
+ raise e
+ return inner
+ return decor
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 13af890..84b7ee6 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -251,6 +251,11 @@
"stdout:\n%(stdout)s")
+class SSHClientProxyClientLoop(TempestException):
+ message = ("SSH client proxy client has same host: %(host)s, port: "
+ "%(port)s and username: %(username)s as parent")
+
+
class UnknownServiceClient(TempestException):
message = "Service clients named %(services)s are not known"
@@ -280,3 +285,12 @@
class InvalidParam(TempestException):
message = ("Invalid Parameter passed: %(invalid_param)s")
+
+
+class ConsistencyGroupException(TempestException):
+ message = "Consistency group %(cg_id)s failed and is in ERROR status"
+
+
+class ConsistencyGroupSnapshotException(TempestException):
+ message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is "
+ "in ERROR status")
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 833cfd6..90debd9 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -18,7 +18,6 @@
import importlib
import inspect
import sys
-import warnings
from debtcollector import removals
from oslo_log import log as logging
@@ -32,9 +31,9 @@
from tempest.lib.services import image
from tempest.lib.services import network
from tempest.lib.services import object_storage
+from tempest.lib.services import placement
from tempest.lib.services import volume
-warnings.simplefilter("once")
LOG = logging.getLogger(__name__)
@@ -46,6 +45,7 @@
"""
return {
'compute': compute,
+ 'placement': placement,
'identity.v2': identity.v2,
'identity.v3': identity.v3,
'image.v1': image.v1,
diff --git a/tempest/lib/services/compute/agents_client.py b/tempest/lib/services/compute/agents_client.py
index 408f75d..12b3900 100644
--- a/tempest/lib/services/compute/agents_client.py
+++ b/tempest/lib/services/compute/agents_client.py
@@ -28,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-agent-builds
+ https://docs.openstack.org/api-ref/compute/#list-agent-builds
"""
url = 'os-agents'
if params:
@@ -43,7 +43,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-agent-build
+ https://docs.openstack.org/api-ref/compute/#create-agent-build
"""
post_body = json.dumps({'agent': kwargs})
resp, body = self.post('os-agents', post_body)
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-agent-build
+ https://docs.openstack.org/api-ref/compute/#delete-agent-build
"""
resp, body = self.delete("os-agents/%s" % agent_id)
self.validate_response(schema.delete_agent, resp, body)
@@ -67,7 +67,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-agent-build
+ https://docs.openstack.org/api-ref/compute/#update-agent-build
"""
put_body = json.dumps({'para': kwargs})
resp, body = self.put('os-agents/%s' % agent_id, put_body)
diff --git a/tempest/lib/services/compute/aggregates_client.py b/tempest/lib/services/compute/aggregates_client.py
index 57f5e4e..c21cc16 100644
--- a/tempest/lib/services/compute/aggregates_client.py
+++ b/tempest/lib/services/compute/aggregates_client.py
@@ -51,7 +51,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-aggregate
+ https://docs.openstack.org/api-ref/compute/#create-aggregate
"""
post_body = json.dumps({'aggregate': kwargs})
resp, body = self.post('os-aggregates', post_body)
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-aggregate
+ https://docs.openstack.org/api-ref/compute/#update-aggregate
"""
put_body = json.dumps({'aggregate': kwargs})
resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
@@ -100,7 +100,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-host
+ https://docs.openstack.org/api-ref/compute/#add-host
"""
post_body = json.dumps({'add_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
@@ -115,7 +115,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#remove-host
+ https://docs.openstack.org/api-ref/compute/#remove-host
"""
post_body = json.dumps({'remove_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
@@ -130,7 +130,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-update-aggregate-metadata
+ https://docs.openstack.org/api-ref/compute/#create-or-update-aggregate-metadata
"""
post_body = json.dumps({'set_metadata': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
diff --git a/tempest/lib/services/compute/baremetal_nodes_client.py b/tempest/lib/services/compute/baremetal_nodes_client.py
index e44c195..3efdbce 100644
--- a/tempest/lib/services/compute/baremetal_nodes_client.py
+++ b/tempest/lib/services/compute/baremetal_nodes_client.py
@@ -29,7 +29,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-bare-metal-nodes
+ https://docs.openstack.org/api-ref/compute/#list-bare-metal-nodes
"""
url = 'os-baremetal-nodes'
if params:
@@ -43,7 +43,7 @@
"""Show the details of a single baremetal node.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/compute/#show-bare-metal-node-details
+ https://docs.openstack.org/api-ref/compute/#show-bare-metal-node-details
"""
url = 'os-baremetal-nodes/%s' % baremetal_node_id
resp, body = self.get(url)
diff --git a/tempest/lib/services/compute/fixed_ips_client.py b/tempest/lib/services/compute/fixed_ips_client.py
index 968646c..098c856 100644
--- a/tempest/lib/services/compute/fixed_ips_client.py
+++ b/tempest/lib/services/compute/fixed_ips_client.py
@@ -34,7 +34,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#reserve-or-release-a-fixed-ip
+ https://docs.openstack.org/api-ref/compute/#reserve-or-release-a-fixed-ip
"""
url = "os-fixed-ips/%s/action" % fixed_ip
resp, body = self.post(url, json.dumps(kwargs))
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index 2fad0a4..e22b5b2 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -41,8 +41,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-flavors
- https://developer.openstack.org/api-ref/compute/#list-flavors-with-details
+ https://docs.openstack.org/api-ref/compute/#list-flavors
+ https://docs.openstack.org/api-ref/compute/#list-flavors-with-details
"""
url = 'flavors'
schema = self.get_schema(self.schema_versions_info)
@@ -64,7 +64,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-flavor-details
+ https://docs.openstack.org/api-ref/compute/#show-flavor-details
"""
resp, body = self.get("flavors/%s" % flavor_id)
body = json.loads(body)
@@ -78,7 +78,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-flavor
+ https://docs.openstack.org/api-ref/compute/#create-flavor
"""
if 'ephemeral' in kwargs:
kwargs['OS-FLV-EXT-DATA:ephemeral'] = kwargs.pop('ephemeral')
@@ -99,7 +99,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-flavor-description
+ https://docs.openstack.org/api-ref/compute/#update-flavor-description
"""
put_body = json.dumps({'flavor': kwargs})
resp, body = self.put("flavors/%s" % flavor_id, put_body)
@@ -115,7 +115,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-flavor
+ https://docs.openstack.org/api-ref/compute/#delete-flavor
"""
resp, body = self.delete("flavors/{0}".format(flavor_id))
self.validate_response(schema.delete_flavor, resp, body)
@@ -141,7 +141,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-extra-specs-for-a-flavor
+ https://docs.openstack.org/api-ref/compute/#create-extra-specs-for-a-flavor
"""
post_body = json.dumps({'extra_specs': kwargs})
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
@@ -156,7 +156,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-extra-specs-for-a-flavor
+ https://docs.openstack.org/api-ref/compute/#list-extra-specs-for-a-flavor
"""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
body = json.loads(body)
@@ -169,10 +169,10 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-an-extra-spec-for-a-flavor
+ https://docs.openstack.org/api-ref/compute/#show-an-extra-spec-for-a-flavor
"""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (flavor_id,
- key))
+ key))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
@@ -184,7 +184,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-an-extra-spec-for-a-flavor
+ https://docs.openstack.org/api-ref/compute/#update-an-extra-spec-for-a-flavor
"""
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), json.dumps(kwargs))
@@ -201,7 +201,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-an-extra-spec-for-a-flavor
+ https://docs.openstack.org/api-ref/compute/#delete-an-extra-spec-for-a-flavor
"""
resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
(flavor_id, key))
@@ -214,7 +214,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-flavor-access-information-for-given-flavor
+ https://docs.openstack.org/api-ref/compute/#list-flavor-access-information-for-given-flavor
"""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
@@ -227,7 +227,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-flavor-access-to-tenant-addtenantaccess-action
+ https://docs.openstack.org/api-ref/compute/#add-flavor-access-to-tenant-addtenantaccess-action
"""
post_body = {
'addTenantAccess': {
@@ -246,7 +246,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#remove-flavor-access-from-tenant-removetenantaccess-action
+ https://docs.openstack.org/api-ref/compute/#remove-flavor-access-from-tenant-removetenantaccess-action
"""
post_body = {
'removeTenantAccess': {
diff --git a/tempest/lib/services/compute/floating_ips_client.py b/tempest/lib/services/compute/floating_ips_client.py
index 5364d97..d7a1a9b 100644
--- a/tempest/lib/services/compute/floating_ips_client.py
+++ b/tempest/lib/services/compute/floating_ips_client.py
@@ -29,7 +29,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-floating-ip-addresses
+ https://docs.openstack.org/api-ref/compute/#list-floating-ip-addresses
"""
url = 'os-floating-ips'
if params:
@@ -45,7 +45,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-floating-ip-address-details
+ https://docs.openstack.org/api-ref/compute/#show-floating-ip-address-details
"""
url = "os-floating-ips/%s" % floating_ip_id
resp, body = self.get(url)
@@ -58,7 +58,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-allocate-floating-ip-address
+ https://docs.openstack.org/api-ref/compute/#create-allocate-floating-ip-address
"""
url = 'os-floating-ips'
post_body = json.dumps(kwargs)
@@ -72,7 +72,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-deallocate-floating-ip-address
+ https://docs.openstack.org/api-ref/compute/#delete-deallocate-floating-ip-address
"""
url = "os-floating-ips/%s" % floating_ip_id
resp, body = self.delete(url)
diff --git a/tempest/lib/services/compute/hosts_client.py b/tempest/lib/services/compute/hosts_client.py
index 1fdd907..743b4ec 100644
--- a/tempest/lib/services/compute/hosts_client.py
+++ b/tempest/lib/services/compute/hosts_client.py
@@ -27,7 +27,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-hosts
+ https://docs.openstack.org/api-ref/compute/#list-hosts
"""
url = 'os-hosts'
@@ -52,7 +52,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-host-status
+ https://docs.openstack.org/api-ref/compute/#update-host-status
"""
request_body = {
diff --git a/tempest/lib/services/compute/images_client.py b/tempest/lib/services/compute/images_client.py
index 0f4eb42..b252ee9 100644
--- a/tempest/lib/services/compute/images_client.py
+++ b/tempest/lib/services/compute/images_client.py
@@ -34,7 +34,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-image-createimage-action
+ https://docs.openstack.org/api-ref/compute/#create-image-createimage-action
"""
post_body = {'createImage': kwargs}
@@ -52,8 +52,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-images
- https://developer.openstack.org/api-ref/compute/#list-images-with-details
+ https://docs.openstack.org/api-ref/compute/#list-images
+ https://docs.openstack.org/api-ref/compute/#list-images-with-details
"""
url = 'images'
_schema = schema.list_images
@@ -94,7 +94,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-image-metadata
+ https://docs.openstack.org/api-ref/compute/#update-image-metadata
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % image_id, post_body)
@@ -107,7 +107,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-image-metadata
+ https://docs.openstack.org/api-ref/compute/#create-image-metadata
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % image_id, post_body)
@@ -127,7 +127,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-update-image-metadata-item
+ https://docs.openstack.org/api-ref/compute/#create-or-update-image-metadata-item
"""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (image_id, key),
diff --git a/tempest/lib/services/compute/interfaces_client.py b/tempest/lib/services/compute/interfaces_client.py
index d7c3107..e1c02fa 100644
--- a/tempest/lib/services/compute/interfaces_client.py
+++ b/tempest/lib/services/compute/interfaces_client.py
@@ -33,7 +33,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-interface
+ https://docs.openstack.org/api-ref/compute/#create-interface
"""
post_body = {'interfaceAttachment': kwargs}
post_body = json.dumps(post_body)
diff --git a/tempest/lib/services/compute/keypairs_client.py b/tempest/lib/services/compute/keypairs_client.py
index 5215fca..47cf2d0 100644
--- a/tempest/lib/services/compute/keypairs_client.py
+++ b/tempest/lib/services/compute/keypairs_client.py
@@ -32,7 +32,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-keypairs
+ https://docs.openstack.org/api-ref/compute/#list-keypairs
"""
url = 'os-keypairs'
if params:
@@ -48,7 +48,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-keypair-details
+ https://docs.openstack.org/api-ref/compute/#show-keypair-details
"""
url = "os-keypairs/%s" % keypair_name
if params:
@@ -64,7 +64,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-import-keypair
+ https://docs.openstack.org/api-ref/compute/#create-or-import-keypair
"""
post_body = json.dumps({'keypair': kwargs})
resp, body = self.post("os-keypairs", body=post_body)
@@ -78,7 +78,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-keypair
+ https://docs.openstack.org/api-ref/compute/#delete-keypair
"""
url = "os-keypairs/%s" % keypair_name
if params:
diff --git a/tempest/lib/services/compute/migrations_client.py b/tempest/lib/services/compute/migrations_client.py
index 68c8f3f..812dc96 100644
--- a/tempest/lib/services/compute/migrations_client.py
+++ b/tempest/lib/services/compute/migrations_client.py
@@ -18,6 +18,8 @@
from tempest.lib.api_schema.response.compute.v2_1 import migrations as schema
from tempest.lib.api_schema.response.compute.v2_23 import migrations \
as schemav223
+from tempest.lib.api_schema.response.compute.v2_59 import migrations \
+ as schemav259
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -25,14 +27,15 @@
class MigrationsClient(base_compute_client.BaseComputeClient):
schema_versions_info = [
{'min': None, 'max': '2.22', 'schema': schema},
- {'min': '2.23', 'max': None, 'schema': schemav223}]
+ {'min': '2.23', 'max': '2.58', 'schema': schemav223},
+ {'min': '2.59', 'max': None, 'schema': schemav259}]
def list_migrations(self, **params):
"""List all migrations.
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-migrations
+ https://docs.openstack.org/api-ref/compute/#list-migrations
"""
url = 'os-migrations'
diff --git a/tempest/lib/services/compute/quota_classes_client.py b/tempest/lib/services/compute/quota_classes_client.py
index 64e06f4..9b64099 100644
--- a/tempest/lib/services/compute/quota_classes_client.py
+++ b/tempest/lib/services/compute/quota_classes_client.py
@@ -37,7 +37,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-update-quotas-for-quota-class
+ https://docs.openstack.org/api-ref/compute/#create-or-update-quotas-for-quota-class
"""
post_body = json.dumps({'quota_class_set': kwargs})
diff --git a/tempest/lib/services/compute/quotas_client.py b/tempest/lib/services/compute/quotas_client.py
index 99c8d0f..12e865e 100644
--- a/tempest/lib/services/compute/quotas_client.py
+++ b/tempest/lib/services/compute/quotas_client.py
@@ -35,8 +35,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-a-quota
- https://developer.openstack.org/api-ref/compute/#show-the-detail-of-quota
+ https://docs.openstack.org/api-ref/compute/#show-a-quota
+ https://docs.openstack.org/api-ref/compute/#show-the-detail-of-quota
"""
params = {}
@@ -59,7 +59,7 @@
def show_default_quota_set(self, tenant_id):
"""List the default quota set for a tenant.
- https://developer.openstack.org/api-ref/compute/#list-default-quotas-for-tenant
+ https://docs.openstack.org/api-ref/compute/#list-default-quotas-for-tenant
"""
url = 'os-quota-sets/%s/defaults' % tenant_id
@@ -74,7 +74,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-quotas
+ https://docs.openstack.org/api-ref/compute/#update-quotas
"""
post_body = json.dumps({'quota_set': kwargs})
@@ -94,7 +94,7 @@
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set.
- https://developer.openstack.org/api-ref/compute/#revert-quotas-to-defaults
+ https://docs.openstack.org/api-ref/compute/#revert-quotas-to-defaults
"""
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
schema = self.get_schema(self.schema_versions_info)
diff --git a/tempest/lib/services/compute/security_group_default_rules_client.py b/tempest/lib/services/compute/security_group_default_rules_client.py
index 70cab88..bed18e9 100644
--- a/tempest/lib/services/compute/security_group_default_rules_client.py
+++ b/tempest/lib/services/compute/security_group_default_rules_client.py
@@ -28,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-default-security-group-rule
+ https://docs.openstack.org/api-ref/compute/#create-default-security-group-rule
"""
post_body = json.dumps({'security_group_default_rule': kwargs})
url = 'os-security-group-default-rules'
diff --git a/tempest/lib/services/compute/security_group_rules_client.py b/tempest/lib/services/compute/security_group_rules_client.py
index 710bfab..7c2bd66 100644
--- a/tempest/lib/services/compute/security_group_rules_client.py
+++ b/tempest/lib/services/compute/security_group_rules_client.py
@@ -28,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-security-group-rule
+ https://docs.openstack.org/api-ref/compute/#create-security-group-rule
"""
post_body = json.dumps({'security_group_rule': kwargs})
url = 'os-security-group-rules'
diff --git a/tempest/lib/services/compute/security_groups_client.py b/tempest/lib/services/compute/security_groups_client.py
index b525f68..9493144 100644
--- a/tempest/lib/services/compute/security_groups_client.py
+++ b/tempest/lib/services/compute/security_groups_client.py
@@ -30,7 +30,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-security-groups
+ https://docs.openstack.org/api-ref/compute/#list-security-groups
"""
url = 'os-security-groups'
@@ -47,7 +47,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-security-group-details
+ https://docs.openstack.org/api-ref/compute/#show-security-group-details
"""
url = "os-security-groups/%s" % security_group_id
resp, body = self.get(url)
@@ -60,7 +60,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-security-group
+ https://docs.openstack.org/api-ref/compute/#create-security-group
"""
post_body = json.dumps({'security_group': kwargs})
resp, body = self.post('os-security-groups', post_body)
@@ -73,7 +73,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-security-group
+ https://docs.openstack.org/api-ref/compute/#update-security-group
"""
post_body = json.dumps({'security_group': kwargs})
resp, body = self.put('os-security-groups/%s' % security_group_id,
@@ -87,7 +87,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-security-group
+ https://docs.openstack.org/api-ref/compute/#delete-security-group
"""
resp, body = self.delete(
'os-security-groups/%s' % security_group_id)
diff --git a/tempest/lib/services/compute/server_groups_client.py b/tempest/lib/services/compute/server_groups_client.py
index 0d440d5..89ad2d9 100644
--- a/tempest/lib/services/compute/server_groups_client.py
+++ b/tempest/lib/services/compute/server_groups_client.py
@@ -35,7 +35,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-server-group
+ https://docs.openstack.org/api-ref/compute/#create-server-group
"""
post_body = json.dumps({'server_group': kwargs})
resp, body = self.post('os-server-groups', post_body)
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 9eed4b3..3ceecda 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -33,6 +33,9 @@
from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
+from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
+from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
+from tempest.lib.api_schema.response.compute.v2_73 import servers as schemav273
from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
@@ -55,7 +58,10 @@
{'min': '2.48', 'max': '2.53', 'schema': schemav248},
{'min': '2.54', 'max': '2.56', 'schema': schemav254},
{'min': '2.57', 'max': '2.62', 'schema': schemav257},
- {'min': '2.63', 'max': None, 'schema': schemav263}]
+ {'min': '2.63', 'max': '2.69', 'schema': schemav263},
+ {'min': '2.70', 'max': '2.70', 'schema': schemav270},
+ {'min': '2.71', 'max': '2.72', 'schema': schemav271},
+ {'min': '2.73', 'max': None, 'schema': schemav273}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
@@ -68,7 +74,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/compute/#create-server
+ https://docs.openstack.org/api-ref/compute/#create-server
:param name: Server name
:param imageRef: Image reference (UUID)
@@ -113,7 +119,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-server
+ https://docs.openstack.org/api-ref/compute/#update-server
Most parameters except the following are passed to the API without
any changes.
@@ -134,7 +140,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-server-details
+ https://docs.openstack.org/api-ref/compute/#show-server-details
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
@@ -147,7 +153,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-server
+ https://docs.openstack.org/api-ref/compute/#delete-server
"""
resp, body = self.delete("servers/%s" % server_id)
self.validate_response(schema.delete_server, resp, body)
@@ -158,8 +164,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-servers
- https://developer.openstack.org/api-ref/compute/#list-servers-detailed
+ https://docs.openstack.org/api-ref/compute/#list-servers
+ https://docs.openstack.org/api-ref/compute/#list-servers-detailed
"""
url = 'servers'
@@ -182,7 +188,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-ips
+ https://docs.openstack.org/api-ref/compute/#list-ips
"""
resp, body = self.get("servers/%s/ips" % server_id)
body = json.loads(body)
@@ -205,6 +211,9 @@
post_body)
if body:
body = json.loads(body)
+ else:
+ if isinstance(body, bytes):
+ body = body.decode('utf-8')
self.validate_response(schema, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -213,7 +222,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-server-back-up-createbackup-action
+ https://docs.openstack.org/api-ref/compute/#create-server-back-up-createbackup-action
"""
return self.action(server_id, "createBackup", **kwargs)
@@ -222,7 +231,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action
+ https://docs.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action
"""
return self.action(server_id, 'changePassword', **kwargs)
@@ -250,7 +259,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#reboot-server-reboot-action
+ https://docs.openstack.org/api-ref/compute/#reboot-server-reboot-action
"""
return self.action(server_id, 'reboot', **kwargs)
@@ -259,7 +268,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#rebuild-server-rebuild-action
+ https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action
Most parameters except the following are passed to the API without
any changes.
@@ -281,7 +290,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#resize-server-resize-action
+ https://docs.openstack.org/api-ref/compute/#resize-server-resize-action
Most parameters except the following are passed to the API without
any changes.
@@ -297,7 +306,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action
+ https://docs.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action
"""
return self.action(server_id, 'confirmResize',
schema.server_actions_confirm_resize,
@@ -308,7 +317,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action
+ https://docs.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action
"""
return self.action(server_id, 'revertResize', **kwargs)
@@ -317,7 +326,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-all-metadata
+ https://docs.openstack.org/api-ref/compute/#list-all-metadata
"""
resp, body = self.get("servers/%s/metadata" % server_id)
body = json.loads(body)
@@ -329,7 +338,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#replace-metadata-items
+ https://docs.openstack.org/api-ref/compute/#replace-metadata-items
"""
if no_metadata_field:
post_body = ""
@@ -346,7 +355,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-update-metadata-items
+ https://docs.openstack.org/api-ref/compute/#create-or-update-metadata-items
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % server_id,
@@ -361,7 +370,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-metadata-item-details
+ https://docs.openstack.org/api-ref/compute/#show-metadata-item-details
"""
resp, body = self.get("servers/%s/metadata/%s" % (server_id, key))
body = json.loads(body)
@@ -374,7 +383,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-update-metadata-item
+ https://docs.openstack.org/api-ref/compute/#create-or-update-metadata-item
"""
post_body = json.dumps({'meta': meta})
resp, body = self.put('servers/%s/metadata/%s' % (server_id, key),
@@ -389,7 +398,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-metadata-item
+ https://docs.openstack.org/api-ref/compute/#delete-metadata-item
"""
resp, body = self.delete("servers/%s/metadata/%s" %
(server_id, key))
@@ -402,7 +411,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#stop-server-os-stop-action
+ https://docs.openstack.org/api-ref/compute/#stop-server-os-stop-action
"""
return self.action(server_id, 'os-stop', **kwargs)
@@ -411,7 +420,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#start-server-os-start-action
+ https://docs.openstack.org/api-ref/compute/#start-server-os-start-action
"""
return self.action(server_id, 'os-start', **kwargs)
@@ -420,17 +429,23 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#attach-a-volume-to-an-instance
+ https://docs.openstack.org/api-ref/compute/#attach-a-volume-to-an-instance
"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.attach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def update_attached_volume(self, server_id, attachment_id, **kwargs):
- """Swaps a volume attached to an instance for another volume"""
+ """Swaps a volume attached to an instance for another volume
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/compute/#update-a-volume-attachment
+ """
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.put('servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id),
@@ -443,7 +458,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#detach-a-volume-from-an-instance
+ https://docs.openstack.org/api-ref/compute/#detach-a-volume-from-an-instance
"""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
@@ -455,11 +470,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-a-detail-of-a-volume-attachment
+ https://docs.openstack.org/api-ref/compute/#show-a-detail-of-a-volume-attachment
"""
resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
server_id, volume_id))
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.show_volume_attachment, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -468,11 +484,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-volume-attachments-for-an-instance
+ https://docs.openstack.org/api-ref/compute/#list-volume-attachments-for-an-instance
"""
resp, body = self.get('servers/%s/os-volume_attachments' % (
server_id))
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_volume_attachments, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -481,7 +498,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-security-group-to-a-server-addsecuritygroup-action
+ https://docs.openstack.org/api-ref/compute/#add-security-group-to-a-server-addsecuritygroup-action
"""
return self.action(server_id, 'addSecurityGroup', **kwargs)
@@ -490,7 +507,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#remove-security-group-from-a-server-removesecuritygroup-action
+ https://docs.openstack.org/api-ref/compute/#remove-security-group-from-a-server-removesecuritygroup-action
"""
return self.action(server_id, 'removeSecurityGroup', **kwargs)
@@ -499,7 +516,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#live-migrate-server-os-migratelive-action
+ https://docs.openstack.org/api-ref/compute/#live-migrate-server-os-migratelive-action
"""
return self.action(server_id, 'os-migrateLive', **kwargs)
@@ -508,7 +525,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#migrate-server-migrate-action
+ https://docs.openstack.org/api-ref/compute/#migrate-server-migrate-action
"""
return self.action(server_id, 'migrate', **kwargs)
@@ -517,7 +534,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#lock-server-lock-action
+ https://docs.openstack.org/api-ref/compute/#lock-server-lock-action
"""
return self.action(server_id, 'lock', **kwargs)
@@ -526,7 +543,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#unlock-server-unlock-action
+ https://docs.openstack.org/api-ref/compute/#unlock-server-unlock-action
"""
return self.action(server_id, 'unlock', **kwargs)
@@ -535,7 +552,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#suspend-server-suspend-action
+ https://docs.openstack.org/api-ref/compute/#suspend-server-suspend-action
"""
return self.action(server_id, 'suspend', **kwargs)
@@ -544,7 +561,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#resume-suspended-server-resume-action
+ https://docs.openstack.org/api-ref/compute/#resume-suspended-server-resume-action
"""
return self.action(server_id, 'resume', **kwargs)
@@ -553,7 +570,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#pause-server-pause-action
+ https://docs.openstack.org/api-ref/compute/#pause-server-pause-action
"""
return self.action(server_id, 'pause', **kwargs)
@@ -562,7 +579,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#unpause-server-unpause-action
+ https://docs.openstack.org/api-ref/compute/#unpause-server-unpause-action
"""
return self.action(server_id, 'unpause', **kwargs)
@@ -571,7 +588,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#reset-server-state-os-resetstate-action
+ https://docs.openstack.org/api-ref/compute/#reset-server-state-os-resetstate-action
"""
return self.action(server_id, 'os-resetState', **kwargs)
@@ -580,7 +597,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#shelve-server-shelve-action
+ https://docs.openstack.org/api-ref/compute/#shelve-server-shelve-action
"""
return self.action(server_id, 'shelve', **kwargs)
@@ -589,7 +606,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#unshelve-restore-shelved-server-unshelve-action
+ https://docs.openstack.org/api-ref/compute/#unshelve-restore-shelved-server-unshelve-action
"""
return self.action(server_id, 'unshelve', **kwargs)
@@ -598,7 +615,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action
+ https://docs.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action
"""
return self.action(server_id, 'shelveOffload', **kwargs)
@@ -607,7 +624,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action
+ https://docs.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action
"""
return self.action(server_id, 'os-getConsoleOutput',
schema.get_console_output, **kwargs)
@@ -617,7 +634,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-remote-console
+ https://docs.openstack.org/api-ref/compute/#create-remote-console
"""
param = {
'remote_console': {
@@ -636,7 +653,7 @@
def list_virtual_interfaces(self, server_id):
"""List the virtual interfaces used in an instance."""
resp, body = self.get('/'.join(['servers', server_id,
- 'os-virtual-interfaces']))
+ 'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -646,7 +663,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#rescue-server-rescue-action
+ https://docs.openstack.org/api-ref/compute/#rescue-server-rescue-action
"""
if self.enable_instance_password:
rescue_schema = schema.rescue_server_with_admin_pass
@@ -659,7 +676,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#unrescue-server-unrescue-action
+ https://docs.openstack.org/api-ref/compute/#unrescue-server-unrescue-action
"""
return self.action(server_id, 'unrescue')
@@ -692,7 +709,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#force-delete-server-forcedelete-action
+ https://docs.openstack.org/api-ref/compute/#force-delete-server-forcedelete-action
"""
return self.action(server_id, 'forceDelete', **kwargs)
@@ -701,7 +718,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#restore-soft-deleted-instance-restore-action
+ https://docs.openstack.org/api-ref/compute/#restore-soft-deleted-instance-restore-action
"""
return self.action(server_id, 'restore', **kwargs)
@@ -710,7 +727,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#reset-networking-on-a-server-resetnetwork-action
+ https://docs.openstack.org/api-ref/compute/#reset-networking-on-a-server-resetnetwork-action
"""
return self.action(server_id, 'resetNetwork', **kwargs)
@@ -719,7 +736,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#inject-network-information-injectnetworkinfo-action
+ https://docs.openstack.org/api-ref/compute/#inject-network-information-injectnetworkinfo-action
"""
return self.action(server_id, 'injectNetworkInfo', **kwargs)
@@ -728,7 +745,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action-deprecated
+ https://docs.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action-deprecated
"""
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
@@ -738,7 +755,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action-deprecated
+ https://docs.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action-deprecated
"""
return self.action(server_id, 'addFixedIp', **kwargs)
@@ -747,7 +764,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action-deprecated
+ https://docs.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action-deprecated
"""
return self.action(server_id, 'removeFixedIp', **kwargs)
@@ -756,7 +773,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-security-groups-by-server
+ https://docs.openstack.org/api-ref/compute/#list-security-groups-by-server
"""
resp, body = self.get("servers/%s/os-security-groups" % server_id)
body = json.loads(body)
@@ -769,7 +786,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-tags
+ https://docs.openstack.org/api-ref/compute/#list-tags
"""
url = 'servers/%s/tags' % server_id
resp, body = self.get(url)
@@ -783,7 +800,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#replace-tags
+ https://docs.openstack.org/api-ref/compute/#replace-tags
:param tags: List of tags to replace current server tags with.
"""
@@ -800,7 +817,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-all-tags
+ https://docs.openstack.org/api-ref/compute/#delete-all-tags
"""
url = 'servers/%s/tags' % server_id
resp, body = self.delete(url)
@@ -813,7 +830,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#check-tag-existence
+ https://docs.openstack.org/api-ref/compute/#check-tag-existence
:param tag: Check for existence of tag on specified server.
"""
@@ -828,7 +845,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-a-single-tag
+ https://docs.openstack.org/api-ref/compute/#add-a-single-tag
:param tag: Tag to be added to the specified server.
"""
@@ -843,7 +860,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-a-single-tag
+ https://docs.openstack.org/api-ref/compute/#delete-a-single-tag
:param tag: Tag to be removed from the specified server.
"""
@@ -858,7 +875,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
+ https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
"""
if self.enable_instance_password:
evacuate_schema = schema.evacuate_server_with_admin_pass
diff --git a/tempest/lib/services/compute/services_client.py b/tempest/lib/services/compute/services_client.py
index d52de3a..4e3383f 100644
--- a/tempest/lib/services/compute/services_client.py
+++ b/tempest/lib/services/compute/services_client.py
@@ -38,7 +38,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-compute-services
+ https://docs.openstack.org/api-ref/compute/#list-compute-services
"""
url = 'os-services'
if params:
@@ -60,7 +60,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-compute-service
+ https://docs.openstack.org/api-ref/compute/#update-compute-service
"""
put_body = json.dumps(kwargs)
resp, body = self.put('os-services/%s' % service_id, put_body)
@@ -76,7 +76,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#enable-scheduling-for-a-compute-service
+ https://docs.openstack.org/api-ref/compute/#enable-scheduling-for-a-compute-service
"""
post_body = json.dumps(kwargs)
resp, body = self.put('os-services/enable', post_body)
@@ -91,7 +91,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service
+ https://docs.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service
"""
post_body = json.dumps(kwargs)
resp, body = self.put('os-services/disable', post_body)
@@ -106,7 +106,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service-and-log-disabled-reason
+ https://docs.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service-and-log-disabled-reason
"""
post_body = json.dumps(kwargs)
resp, body = self.put('os-services/disable-log-reason', post_body)
@@ -121,7 +121,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-forced-down
+ https://docs.openstack.org/api-ref/compute/#update-forced-down
"""
post_body = json.dumps(kwargs)
resp, body = self.put('os-services/force-down', post_body)
diff --git a/tempest/lib/services/compute/snapshots_client.py b/tempest/lib/services/compute/snapshots_client.py
index df8d6fb..225eb8d 100644
--- a/tempest/lib/services/compute/snapshots_client.py
+++ b/tempest/lib/services/compute/snapshots_client.py
@@ -29,7 +29,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-snapshot
+ https://docs.openstack.org/api-ref/compute/#create-snapshot
"""
post_body = {
'volume_id': volume_id
@@ -53,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-snapshots
+ https://docs.openstack.org/api-ref/compute/#list-snapshots
"""
url = 'os-snapshots'
diff --git a/tempest/lib/services/compute/tenant_usages_client.py b/tempest/lib/services/compute/tenant_usages_client.py
index ade60e5..a34730c 100644
--- a/tempest/lib/services/compute/tenant_usages_client.py
+++ b/tempest/lib/services/compute/tenant_usages_client.py
@@ -28,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-tenant-usage-statistics-for-all-tenants
+ https://docs.openstack.org/api-ref/compute/#list-tenant-usage-statistics-for-all-tenants
"""
url = 'os-simple-tenant-usage'
if params:
@@ -44,7 +44,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-usage-statistics-for-tenant
+ https://docs.openstack.org/api-ref/compute/#show-usage-statistics-for-tenant
"""
url = 'os-simple-tenant-usage/%s' % tenant_id
if params:
diff --git a/tempest/lib/services/compute/versions_client.py b/tempest/lib/services/compute/versions_client.py
index 8fbb136..c6e1783 100644
--- a/tempest/lib/services/compute/versions_client.py
+++ b/tempest/lib/services/compute/versions_client.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import versions as schema
@@ -26,11 +24,7 @@
def list_versions(self):
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
body = json.loads(body)
diff --git a/tempest/lib/services/compute/volumes_client.py b/tempest/lib/services/compute/volumes_client.py
index 95cdd53..11282ee 100644
--- a/tempest/lib/services/compute/volumes_client.py
+++ b/tempest/lib/services/compute/volumes_client.py
@@ -29,8 +29,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#list-volumes
- https://developer.openstack.org/api-ref/compute/#list-volumes-with-details
+ https://docs.openstack.org/api-ref/compute/#list-volumes
+ https://docs.openstack.org/api-ref/compute/#list-volumes-with-details
"""
url = 'os-volumes'
@@ -49,7 +49,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#show-volume-details
+ https://docs.openstack.org/api-ref/compute/#show-volume-details
"""
url = "os-volumes/%s" % volume_id
resp, body = self.get(url)
@@ -62,7 +62,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-volume
+ https://docs.openstack.org/api-ref/compute/#create-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('os-volumes', post_body)
@@ -75,7 +75,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#delete-volume
+ https://docs.openstack.org/api-ref/compute/#delete-volume
"""
resp, body = self.delete("os-volumes/%s" % volume_id)
self.validate_response(schema.delete_volume, resp, body)
diff --git a/tempest/lib/services/identity/v2/endpoints_client.py b/tempest/lib/services/identity/v2/endpoints_client.py
index db8d7cc..d587b83 100644
--- a/tempest/lib/services/identity/v2/endpoints_client.py
+++ b/tempest/lib/services/identity/v2/endpoints_client.py
@@ -25,7 +25,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-admin/index.html#create-endpoint-template
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#create-endpoint-template
"""
post_body = json.dumps({'endpoint': kwargs})
diff --git a/tempest/lib/services/identity/v2/identity_client.py b/tempest/lib/services/identity/v2/identity_client.py
index c610d65..d7526f3 100644
--- a/tempest/lib/services/identity/v2/identity_client.py
+++ b/tempest/lib/services/identity/v2/identity_client.py
@@ -59,7 +59,7 @@
For a full list of available parameters, please refer to the
official API reference:
- https://developer.openstack.org/api-ref/identity/v2-admin/#validate-token
+ https://docs.openstack.org/api-ref/identity/v2-admin/#validate-token
"""
url = "tokens/%s" % token_id
if params:
diff --git a/tempest/lib/services/identity/v2/roles_client.py b/tempest/lib/services/identity/v2/roles_client.py
index 9e841dd..a133fc3 100644
--- a/tempest/lib/services/identity/v2/roles_client.py
+++ b/tempest/lib/services/identity/v2/roles_client.py
@@ -24,7 +24,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#create-a-role
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#create-a-role
"""
post_body = json.dumps({'role': kwargs})
resp, body = self.post('OS-KSADM/roles', post_body)
@@ -37,9 +37,9 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#show-a-role
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#show-a-role
OR
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#show-role-information-by-name
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#show-role-information-by-name
"""
resp, body = self.get('OS-KSADM/roles/%s' % role_id_or_name)
self.expected_success(200, resp.status)
@@ -51,7 +51,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#list-all-roles
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#list-all-roles
"""
url = 'OS-KSADM/roles'
if params:
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#delete-a-role
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#delete-a-role
"""
resp, body = self.delete('OS-KSADM/roles/%s' % role_id)
self.expected_success(204, resp.status)
@@ -77,7 +77,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#grant-roles-to-user-on-tenant
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#grant-roles-to-user-on-tenant
"""
resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id), "")
@@ -101,7 +101,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#revoke-role-from-user-on-tenant
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#revoke-role-from-user-on-tenant
"""
resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id))
diff --git a/tempest/lib/services/identity/v2/services_client.py b/tempest/lib/services/identity/v2/services_client.py
index 47398db..fc51cb4 100644
--- a/tempest/lib/services/identity/v2/services_client.py
+++ b/tempest/lib/services/identity/v2/services_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-ext/#create-service-admin-extension
+ https://docs.openstack.org/api-ref/identity/v2-ext/#create-service-admin-extension
"""
post_body = json.dumps({'OS-KSADM:service': kwargs})
resp, body = self.post('/OS-KSADM/services', post_body)
@@ -47,7 +47,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-ext/#list-services-admin-extension
+ https://docs.openstack.org/api-ref/identity/v2-ext/#list-services-admin-extension
"""
url = '/OS-KSADM/services'
if params:
diff --git a/tempest/lib/services/identity/v2/tenants_client.py b/tempest/lib/services/identity/v2/tenants_client.py
index 026db64..09618ad 100644
--- a/tempest/lib/services/identity/v2/tenants_client.py
+++ b/tempest/lib/services/identity/v2/tenants_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#create-tenant
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#create-tenant
"""
post_body = json.dumps({'tenant': kwargs})
resp, body = self.post('tenants', post_body)
@@ -39,7 +39,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-admin/index.html#delete-tenant
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#delete-tenant
"""
resp, body = self.delete('tenants/%s' % str(tenant_id))
self.expected_success(204, resp.status)
@@ -50,7 +50,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-admin/index.html#show-tenant-details-by-id
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#show-tenant-details-by-id
"""
resp, body = self.get('tenants/%s' % str(tenant_id))
self.expected_success(200, resp.status)
@@ -62,7 +62,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#list-tenants-admin-endpoint
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#list-tenants-admin-endpoint
"""
url = 'tenants'
if params:
@@ -77,7 +77,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#update-tenant
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#update-tenant
"""
if 'id' not in kwargs:
kwargs['id'] = tenant_id
@@ -92,7 +92,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#list-users-on-a-tenant
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#list-users-on-a-tenant
"""
url = '/tenants/%s/users' % tenant_id
if params:
diff --git a/tempest/lib/services/identity/v2/token_client.py b/tempest/lib/services/identity/v2/token_client.py
index 458c862..9f10f58 100644
--- a/tempest/lib/services/identity/v2/token_client.py
+++ b/tempest/lib/services/identity/v2/token_client.py
@@ -105,9 +105,8 @@
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
- headers=headers, body=body)
- self._log_request(method, url, resp, req_headers=headers,
- req_body='<omitted>', resp_body=resp_body)
+ headers=headers, body=body,
+ log_req_body='<omitted>')
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
diff --git a/tempest/lib/services/identity/v2/users_client.py b/tempest/lib/services/identity/v2/users_client.py
index 44bb5fd..72f29be 100644
--- a/tempest/lib/services/identity/v2/users_client.py
+++ b/tempest/lib/services/identity/v2/users_client.py
@@ -24,7 +24,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#create-user-admin-endpoint
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#create-user-admin-endpoint
"""
post_body = json.dumps({'user': kwargs})
resp, body = self.post('users', post_body)
@@ -37,7 +37,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#update-user-admin-endpoint
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#update-user-admin-endpoint
"""
put_body = json.dumps({'user': kwargs})
resp, body = self.put('users/%s' % user_id, put_body)
@@ -50,7 +50,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-admin/index.html#show-user-details-admin-endpoint
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#show-user-details-admin-endpoint
"""
resp, body = self.get("users/%s" % user_id)
self.expected_success(200, resp.status)
@@ -62,7 +62,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-admin/index.html#delete-user-admin-endpoint
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#delete-user-admin-endpoint
"""
resp, body = self.delete("users/%s" % user_id)
self.expected_success(204, resp.status)
@@ -73,7 +73,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v2-admin/index.html#list-users-admin-endpoint
+ https://docs.openstack.org/api-ref/identity/v2-admin/index.html#list-users-admin-endpoint
"""
url = "users"
if params:
@@ -88,7 +88,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v2-ext/index.html#enable-disable-user
+ https://docs.openstack.org/api-ref/identity/v2-ext/index.html#enable-disable-user
"""
# NOTE: The URL (users/<id>/enabled) is different from the api-site
# one (users/<id>/OS-KSADM/enabled) , but they are the same API
diff --git a/tempest/lib/services/identity/v3/application_credentials_client.py b/tempest/lib/services/identity/v3/application_credentials_client.py
index 557aa9e..be2e172 100644
--- a/tempest/lib/services/identity/v3/application_credentials_client.py
+++ b/tempest/lib/services/identity/v3/application_credentials_client.py
@@ -15,7 +15,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#application-credentials
+https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials
"""
from oslo_serialization import jsonutils as json
@@ -32,7 +32,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3/index.html#create-application-credential
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-application-credential
"""
post_body = json.dumps({'application_credential': kwargs})
resp, body = self.post('users/%s/application_credentials' % user_id,
@@ -46,7 +46,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3/index.html#show-application-credential-details
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-application-credential-details
"""
resp, body = self.get('users/%s/application_credentials/%s' %
(user_id, application_credential_id))
@@ -59,7 +59,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3/index.html#list-application-credentials
+ https://docs.openstack.org/api-ref/identity/v3/index.html#list-application-credentials
"""
url = 'users/%s/application_credentials' % user_id
if params:
@@ -75,7 +75,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3/index.html#delete-application-credential
+ https://docs.openstack.org/api-ref/identity/v3/index.html#delete-application-credential
"""
resp, body = self.delete('users/%s/application_credentials/%s' %
(user_id, application_credential_id))
diff --git a/tempest/lib/services/identity/v3/catalog_client.py b/tempest/lib/services/identity/v3/catalog_client.py
index 232b85a..cb9eed1 100644
--- a/tempest/lib/services/identity/v3/catalog_client.py
+++ b/tempest/lib/services/identity/v3/catalog_client.py
@@ -11,7 +11,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#get-service-catalog
+https://docs.openstack.org/api-ref/identity/v3/index.html#get-service-catalog
"""
from oslo_serialization import jsonutils as json
diff --git a/tempest/lib/services/identity/v3/credentials_client.py b/tempest/lib/services/identity/v3/credentials_client.py
index 6e5fd31..3f4b40e 100644
--- a/tempest/lib/services/identity/v3/credentials_client.py
+++ b/tempest/lib/services/identity/v3/credentials_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-http://developer.openstack.org/api-ref/identity/v3/index.html#credentials
+https://docs.openstack.org/api-ref/identity/v3/index.html#credentials
"""
from oslo_serialization import jsonutils as json
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-credential
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-credential
"""
post_body = json.dumps({'credential': kwargs})
resp, body = self.post('credentials', post_body)
@@ -44,7 +44,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-credential
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-credential
"""
post_body = json.dumps({'credential': kwargs})
resp, body = self.patch('credentials/%s' % credential_id, post_body)
@@ -57,7 +57,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-credential-details
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-credential-details
"""
resp, body = self.get('credentials/%s' % credential_id)
self.expected_success(200, resp.status)
@@ -69,7 +69,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-credentials
+ https://docs.openstack.org/api-ref/identity/v3/#list-credentials
"""
url = 'credentials'
if params:
@@ -84,7 +84,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#delete-credential
+ https://docs.openstack.org/api-ref/identity/v3/#delete-credential
"""
resp, body = self.delete('credentials/%s' % credential_id)
self.expected_success(204, resp.status)
diff --git a/tempest/lib/services/identity/v3/domain_configuration_client.py b/tempest/lib/services/identity/v3/domain_configuration_client.py
index d57f2d4..9410b99 100644
--- a/tempest/lib/services/identity/v3/domain_configuration_client.py
+++ b/tempest/lib/services/identity/v3/domain_configuration_client.py
@@ -25,7 +25,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-default-configuration-settings
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-default-configuration-settings
"""
url = 'domains/config/default'
resp, body = self.get(url)
@@ -38,7 +38,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-default-configuration-for-a-group
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-default-configuration-for-a-group
"""
url = 'domains/config/%s/default' % group
resp, body = self.get(url)
@@ -51,7 +51,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-default-option-for-a-group
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-default-option-for-a-group
"""
url = 'domains/config/%s/%s/default' % (group, option)
resp, body = self.get(url)
@@ -64,7 +64,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-domain-group-option-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-domain-group-option-configuration
"""
url = 'domains/%s/config/%s/%s' % (domain_id, group, option)
resp, body = self.get(url)
@@ -78,7 +78,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-domain-group-option-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-domain-group-option-configuration
"""
url = 'domains/%s/config/%s/%s' % (domain_id, group, option)
resp, body = self.patch(url, json.dumps({'config': kwargs}))
@@ -91,7 +91,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#delete-domain-group-option-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#delete-domain-group-option-configuration
"""
url = 'domains/%s/config/%s/%s' % (domain_id, group, option)
resp, body = self.delete(url)
@@ -103,7 +103,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-domain-group-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-domain-group-configuration
"""
url = 'domains/%s/config/%s' % (domain_id, group)
resp, body = self.get(url)
@@ -116,7 +116,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-domain-group-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-domain-group-configuration
"""
url = 'domains/%s/config/%s' % (domain_id, group)
resp, body = self.patch(url, json.dumps({'config': kwargs}))
@@ -129,7 +129,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#delete-domain-group-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#delete-domain-group-configuration
"""
url = 'domains/%s/config/%s' % (domain_id, group)
resp, body = self.delete(url)
@@ -141,7 +141,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-domain-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-domain-configuration
"""
url = 'domains/%s/config' % domain_id
resp, body = self.put(url, json.dumps({'config': kwargs}))
@@ -154,7 +154,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-domain-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-domain-configuration
"""
url = 'domains/%s/config' % domain_id
resp, body = self.get(url)
@@ -167,7 +167,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-domain-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-domain-configuration
"""
url = 'domains/%s/config' % domain_id
resp, body = self.patch(url, json.dumps({'config': kwargs}))
@@ -180,7 +180,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#delete-domain-configuration
+ https://docs.openstack.org/api-ref/identity/v3/index.html#delete-domain-configuration
"""
url = 'domains/%s/config' % domain_id
resp, body = self.delete(url)
diff --git a/tempest/lib/services/identity/v3/domains_client.py b/tempest/lib/services/identity/v3/domains_client.py
index 43cb62c..bd32cfc 100644
--- a/tempest/lib/services/identity/v3/domains_client.py
+++ b/tempest/lib/services/identity/v3/domains_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-domain
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-domain
"""
post_body = json.dumps({'domain': kwargs})
resp, body = self.post('domains', post_body)
@@ -38,7 +38,7 @@
"""Deletes a domain.
For APi details, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#delete-domain
+ https://docs.openstack.org/api-ref/identity/v3/index.html#delete-domain
"""
resp, body = self.delete('domains/%s' % domain_id)
self.expected_success(204, resp.status)
@@ -49,7 +49,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#list-domains
+ https://docs.openstack.org/api-ref/identity/v3/index.html#list-domains
"""
url = 'domains'
if params:
@@ -64,7 +64,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-domain
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-domain
"""
post_body = json.dumps({'domain': kwargs})
resp, body = self.patch('domains/%s' % domain_id, post_body)
@@ -76,7 +76,7 @@
"""Get Domain details.
For API details, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#show-domain-details
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-domain-details
"""
resp, body = self.get('domains/%s' % domain_id)
self.expected_success(200, resp.status)
diff --git a/tempest/lib/services/identity/v3/endpoint_filter_client.py b/tempest/lib/services/identity/v3/endpoint_filter_client.py
index a8cd722..ce84869 100644
--- a/tempest/lib/services/identity/v3/endpoint_filter_client.py
+++ b/tempest/lib/services/identity/v3/endpoint_filter_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3-ext/#os-ep-filter-api
+https://docs.openstack.org/api-ref/identity/v3-ext/#os-ep-filter-api
"""
from oslo_serialization import jsonutils as json
diff --git a/tempest/lib/services/identity/v3/endpoint_groups_client.py b/tempest/lib/services/identity/v3/endpoint_groups_client.py
index ce99389..90d353d 100644
--- a/tempest/lib/services/identity/v3/endpoint_groups_client.py
+++ b/tempest/lib/services/identity/v3/endpoint_groups_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the
official API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/#create-endpoint-group
+ https://docs.openstack.org/api-ref/identity/v3-ext/#create-endpoint-group
"""
post_body = json.dumps({'endpoint_group': kwargs})
resp, body = self.post('OS-EP-FILTER/endpoint_groups', post_body)
@@ -39,7 +39,7 @@
For a full list of available parameters, please refer to the
official API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/#update-endpoint-group
+ https://docs.openstack.org/api-ref/identity/v3-ext/#update-endpoint-group
"""
post_body = json.dumps({'endpoint_group': kwargs})
resp, body = self.patch(
diff --git a/tempest/lib/services/identity/v3/endpoints_client.py b/tempest/lib/services/identity/v3/endpoints_client.py
index e24dca7..236b34c 100644
--- a/tempest/lib/services/identity/v3/endpoints_client.py
+++ b/tempest/lib/services/identity/v3/endpoints_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
+https://docs.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
"""
from oslo_serialization import jsonutils as json
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-endpoints
+ https://docs.openstack.org/api-ref/identity/v3/#list-endpoints
"""
url = 'endpoints'
if params:
@@ -46,7 +46,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-endpoint
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-endpoint
"""
post_body = json.dumps({'endpoint': kwargs})
resp, body = self.post('endpoints', post_body)
@@ -59,7 +59,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-endpoint
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-endpoint
"""
post_body = json.dumps({'endpoint': kwargs})
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
diff --git a/tempest/lib/services/identity/v3/groups_client.py b/tempest/lib/services/identity/v3/groups_client.py
index bc6ead0..f823b21 100644
--- a/tempest/lib/services/identity/v3/groups_client.py
+++ b/tempest/lib/services/identity/v3/groups_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#groups
+https://docs.openstack.org/api-ref/identity/v3/index.html#groups
"""
from oslo_serialization import jsonutils as json
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-group
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-group
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.post('groups', post_body)
@@ -51,7 +51,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-groups
+ https://docs.openstack.org/api-ref/identity/v3/#list-groups
"""
url = 'groups'
if params:
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-group
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-group
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.patch('groups/%s' % group_id, post_body)
@@ -92,7 +92,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-users-in-group
+ https://docs.openstack.org/api-ref/identity/v3/#list-users-in-group
"""
url = 'groups/%s/users' % group_id
if params:
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 691c7fd..3949437 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -114,8 +114,7 @@
def check_user_has_flag_on_inherited_to_project(
self, project_id, user_id, role_id):
- """Checks whether a user has a role assignment"""
- """with the inherited_to_projects flag on a project."""
+ """Check if user has an inherited project role on project"""
resp, body = self.head(
"OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
% (project_id, user_id, role_id))
@@ -142,8 +141,7 @@
def check_group_has_flag_on_inherited_to_project(
self, project_id, group_id, role_id):
- """Checks whether a group has a role assignment"""
- """with the inherited_to_projects flag on a project."""
+ """Check if group has an inherited project role on project"""
resp, body = self.head(
"OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
% (project_id, group_id, role_id))
diff --git a/tempest/lib/services/identity/v3/oauth_consumers_client.py b/tempest/lib/services/identity/v3/oauth_consumers_client.py
index 97fb141..7438936 100644
--- a/tempest/lib/services/identity/v3/oauth_consumers_client.py
+++ b/tempest/lib/services/identity/v3/oauth_consumers_client.py
@@ -28,7 +28,7 @@
For more information, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#create-consumer
+ https://docs.openstack.org/api-ref/identity/v3-ext/#create-consumer
"""
post_body = {"description": description}
post_body = json.dumps({'consumer': post_body})
@@ -44,7 +44,7 @@
For more information, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#delete-consumer
+ https://docs.openstack.org/api-ref/identity/v3-ext/#delete-consumer
"""
resp, body = self.delete('OS-OAUTH1/consumers/%s' % consumer_id)
self.expected_success(204, resp.status)
@@ -58,7 +58,7 @@
For more information, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/#update-consumer
+ https://docs.openstack.org/api-ref/identity/v3-ext/#update-consumer
"""
post_body = {"description": description}
post_body = json.dumps({'consumer': post_body})
@@ -75,7 +75,7 @@
For more information, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#show-consumer-details
+ https://docs.openstack.org/api-ref/identity/v3-ext/#show-consumer-details
"""
resp, body = self.get('OS-OAUTH1/consumers/%s' % consumer_id)
self.expected_success(200, resp.status)
@@ -87,7 +87,7 @@
For more information, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#list-consumers
+ https://docs.openstack.org/api-ref/identity/v3-ext/#list-consumers
"""
resp, body = self.get('OS-OAUTH1/consumers')
self.expected_success(200, resp.status)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index b1d298b..6ca401b 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -74,6 +74,7 @@
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
scheme = scheme.lower()
netloc = netloc.lower()
+ path = path.replace('//', '/')
normalized_uri = urlparse.urlunparse((scheme, netloc, path,
params, '', ''))
@@ -116,7 +117,7 @@
"""Create request token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#create-request-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#create-request-token
"""
endpoint = 'OS-OAUTH1/request_token'
headers = {'Requested-Project-Id': project_id}
@@ -141,7 +142,7 @@
"""Authorize request token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#authorize-request-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#authorize-request-token
"""
roles = [{'id': role_id} for role_id in role_ids]
body = {'roles': roles}
@@ -157,7 +158,7 @@
"""Create access token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#create-access-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#create-access-token
"""
endpoint = 'OS-OAUTH1/access_token'
oauth_params = self._generate_params_with_signature(
@@ -180,7 +181,7 @@
"""Get access token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#get-access-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#get-access-token
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens/%s"
% (user_id, access_token_id))
@@ -192,7 +193,7 @@
"""Revoke access token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#revoke-access-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#revoke-access-token
"""
resp, body = self.delete("users/%s/OS-OAUTH1/access_tokens/%s"
% (user_id, access_token_id))
@@ -203,7 +204,7 @@
"""List access tokens.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#list-access-tokens
+ https://docs.openstack.org/api-ref/identity/v3-ext/#list-access-tokens
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens"
% (user_id))
@@ -215,7 +216,7 @@
"""List roles for an access token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#list-roles-for-an-access-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#list-roles-for-an-access-token
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens/%s/roles"
% (user_id, access_token_id))
@@ -227,7 +228,7 @@
"""Show role details for an access token.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/#show-role-details-for-an-access-token
+ https://docs.openstack.org/api-ref/identity/v3-ext/#show-role-details-for-an-access-token
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens/%s/roles/%s"
% (user_id, access_token_id, role_id))
diff --git a/tempest/lib/services/identity/v3/policies_client.py b/tempest/lib/services/identity/v3/policies_client.py
index ca8dbbd..31c0d18 100644
--- a/tempest/lib/services/identity/v3/policies_client.py
+++ b/tempest/lib/services/identity/v3/policies_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#policies
+https://docs.openstack.org/api-ref/identity/v3/index.html#policies
"""
from oslo_serialization import jsonutils as json
@@ -30,7 +30,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-policy
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-policy
"""
post_body = json.dumps({'policy': kwargs})
resp, body = self.post('policies', post_body)
@@ -58,7 +58,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-policy
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-policy
"""
post_body = json.dumps({'policy': kwargs})
url = 'policies/%s' % policy_id
@@ -79,7 +79,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#associate-policy-and-endpoint
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#associate-policy-and-endpoint
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/endpoints/{1}"\
.format(policy_id, endpoint_id)
@@ -91,7 +91,7 @@
"""Get policy association of endpoint.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#verify-a-policy-and-endpoint-association
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#verify-a-policy-and-endpoint-association
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/endpoints/{1}"\
.format(policy_id, endpoint_id)
@@ -103,7 +103,7 @@
"""Delete policy association with endpoint.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-policy-and-endpoint-association
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-policy-and-endpoint-association
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/endpoints/{1}"\
.format(policy_id, endpoint_id)
@@ -115,7 +115,7 @@
"""Create policy association with service.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#associate-policy-and-service-type-endpoint
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#associate-policy-and-service-type-endpoint
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/services/{1}"\
.format(policy_id, service_id)
@@ -127,7 +127,7 @@
"""Get policy association of service.
API Reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#verify-a-policy-and-service-type-endpoint-association
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#verify-a-policy-and-service-type-endpoint-association
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/services/{1}"\
.format(policy_id, service_id)
@@ -139,7 +139,7 @@
"""Delete policy association with service.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-policy-and-service-type-endpoint-association
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-policy-and-service-type-endpoint-association
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/services/{1}"\
.format(policy_id, service_id)
@@ -152,7 +152,7 @@
"""Create policy association with service and region.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#associate-policy-and-service-type-endpoint-in-a-region
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#associate-policy-and-service-type-endpoint-in-a-region
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/services/{1}/regions/{2}"\
.format(policy_id, service_id, region_id)
@@ -165,7 +165,7 @@
"""Get policy association of service and region.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#verify-a-policy-and-service-type-endpoint-in-a-region-association
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#verify-a-policy-and-service-type-endpoint-in-a-region-association
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/services/{1}/regions/{2}"\
.format(policy_id, service_id, region_id)
@@ -178,7 +178,7 @@
"""Delete policy association with service and region.
API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-policy-and-service-type-endpoint-in-a-region-association
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-policy-and-service-type-endpoint-in-a-region-association
"""
url = "policies/{0}/OS-ENDPOINT-POLICY/services/{1}/regions/{2}"\
.format(policy_id, service_id, region_id)
diff --git a/tempest/lib/services/identity/v3/project_tags_client.py b/tempest/lib/services/identity/v3/project_tags_client.py
index dd1a2a5..5562273 100644
--- a/tempest/lib/services/identity/v3/project_tags_client.py
+++ b/tempest/lib/services/identity/v3/project_tags_client.py
@@ -48,7 +48,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/identity/v3/#modify-tag-list-for-a-project
+ https://docs.openstack.org/api-ref/identity/v3/#modify-tag-list-for-a-project
"""
body = {'tags': tags}
if kwargs:
diff --git a/tempest/lib/services/identity/v3/projects_client.py b/tempest/lib/services/identity/v3/projects_client.py
index 20787da..b186fba 100644
--- a/tempest/lib/services/identity/v3/projects_client.py
+++ b/tempest/lib/services/identity/v3/projects_client.py
@@ -27,7 +27,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-project
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-project
"""
# Include the project name to the kwargs parameters
@@ -52,7 +52,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-project
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-project
"""
post_body = json.dumps({'project': kwargs})
diff --git a/tempest/lib/services/identity/v3/regions_client.py b/tempest/lib/services/identity/v3/regions_client.py
index d7507cf..a598c9c 100644
--- a/tempest/lib/services/identity/v3/regions_client.py
+++ b/tempest/lib/services/identity/v3/regions_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#regions
+https://docs.openstack.org/api-ref/identity/v3/index.html#regions
"""
from oslo_serialization import jsonutils as json
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-region
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-region
"""
if region_id is not None:
method = self.put
@@ -50,7 +50,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-region
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-region
"""
post_body = json.dumps({'region': kwargs})
resp, body = self.patch('regions/%s' % region_id, post_body)
diff --git a/tempest/lib/services/identity/v3/role_assignments_client.py b/tempest/lib/services/identity/v3/role_assignments_client.py
index a426e69..51ee8f6 100644
--- a/tempest/lib/services/identity/v3/role_assignments_client.py
+++ b/tempest/lib/services/identity/v3/role_assignments_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-role-assignments
+ https://docs.openstack.org/api-ref/identity/v3/#list-role-assignments
:param effective: If True, returns the effective assignments, including
any assignments gained by virtue of group membership
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index 43e3c01..f9356be 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-role
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-role
"""
post_body = json.dumps({'role': kwargs})
resp, body = self.post('roles', post_body)
@@ -42,8 +42,12 @@
return rest_client.ResponseBody(resp, body)
def list_roles(self, **params):
- """Get the list of Roles."""
+ """Get the list of Roles.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3/index.html#list-roles
+ """
url = 'roles'
if params:
url += '?%s' % urllib.urlencode(params)
@@ -57,7 +61,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-role
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-role
"""
post_body = json.dumps({'role': kwargs})
resp, body = self.patch('roles/%s' % role_id, post_body)
@@ -219,7 +223,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#list-all-role-inference-rules
+ https://docs.openstack.org/api-ref/identity/v3/index.html#list-all-role-inference-rules
"""
resp, body = self.get('role_inferences')
self.expected_success(200, resp.status)
diff --git a/tempest/lib/services/identity/v3/services_client.py b/tempest/lib/services/identity/v3/services_client.py
index 7bbe850..eb961a5 100644
--- a/tempest/lib/services/identity/v3/services_client.py
+++ b/tempest/lib/services/identity/v3/services_client.py
@@ -14,7 +14,7 @@
# under the License.
"""
-https://developer.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
+https://docs.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
"""
from oslo_serialization import jsonutils as json
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#update-service
+ https://docs.openstack.org/api-ref/identity/v3/index.html#update-service
"""
patch_body = json.dumps({'service': kwargs})
resp, body = self.patch('services/%s' % service_id, patch_body)
@@ -52,7 +52,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#create-service
+ https://docs.openstack.org/api-ref/identity/v3/index.html#create-service
"""
body = json.dumps({'service': kwargs})
resp, body = self.post("services", body)
@@ -71,7 +71,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-services
+ https://docs.openstack.org/api-ref/identity/v3/#list-services
"""
url = 'services'
if params:
diff --git a/tempest/lib/services/identity/v3/token_client.py b/tempest/lib/services/identity/v3/token_client.py
index d591f03..6956297 100644
--- a/tempest/lib/services/identity/v3/token_client.py
+++ b/tempest/lib/services/identity/v3/token_client.py
@@ -160,10 +160,8 @@
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
- headers=headers, body=body)
- self._log_request(method, url, resp, req_headers=headers,
- req_body='<omitted>', resp_body=resp_body)
-
+ headers=headers, body=body,
+ log_req_body='<omitted>')
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise exceptions.Unauthorized(resp_body['error']['message'])
diff --git a/tempest/lib/services/identity/v3/trusts_client.py b/tempest/lib/services/identity/v3/trusts_client.py
index d113905..f1cc806 100644
--- a/tempest/lib/services/identity/v3/trusts_client.py
+++ b/tempest/lib/services/identity/v3/trusts_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/index.html#create-trust
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#create-trust
"""
post_body = json.dumps({'trust': kwargs})
resp, body = self.post('OS-TRUST/trusts', post_body)
@@ -45,7 +45,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3-ext/index.html#list-trusts
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-trusts
"""
url = "OS-TRUST/trusts/"
if params:
diff --git a/tempest/lib/services/identity/v3/users_client.py b/tempest/lib/services/identity/v3/users_client.py
index e99a971..f47730f 100644
--- a/tempest/lib/services/identity/v3/users_client.py
+++ b/tempest/lib/services/identity/v3/users_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#create-user
+ https://docs.openstack.org/api-ref/identity/v3/#create-user
"""
post_body = json.dumps({'user': kwargs})
resp, body = self.post('users', post_body)
@@ -39,7 +39,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#update-user
+ https://docs.openstack.org/api-ref/identity/v3/#update-user
"""
if 'id' not in kwargs:
kwargs['id'] = user_id
@@ -54,7 +54,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/index.html#change-password-for-user
+ https://docs.openstack.org/api-ref/identity/v3/index.html#change-password-for-user
"""
update_user = json.dumps({'user': kwargs})
resp, _ = self.post('users/%s/password' % user_id, update_user)
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-projects-for-user
+ https://docs.openstack.org/api-ref/identity/v3/#list-projects-for-user
"""
url = 'users/%s/projects' % user_id
if params:
@@ -81,7 +81,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-users
+ https://docs.openstack.org/api-ref/identity/v3/#list-users
"""
url = 'users'
if params:
@@ -109,7 +109,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/identity/v3/#list-groups-to-which-a-user-belongs
+ https://docs.openstack.org/api-ref/identity/v3/#list-groups-to-which-a-user-belongs
"""
url = 'users/%s/groups' % user_id
if params:
diff --git a/tempest/lib/services/identity/v3/versions_client.py b/tempest/lib/services/identity/v3/versions_client.py
index 441ee0d..f3a8986 100644
--- a/tempest/lib/services/identity/v3/versions_client.py
+++ b/tempest/lib/services/identity/v3/versions_client.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
@@ -26,11 +24,7 @@
"""List API versions"""
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
self.expected_success(300, resp.status)
diff --git a/tempest/lib/services/image/v1/image_members_client.py b/tempest/lib/services/image/v1/image_members_client.py
index 2318087..7499ec0 100644
--- a/tempest/lib/services/image/v1/image_members_client.py
+++ b/tempest/lib/services/image/v1/image_members_client.py
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v1/#list-shared-images
+ https://docs.openstack.org/api-ref/image/v1/#list-shared-images
"""
url = 'shared-images/%s' % tenant_id
@@ -45,7 +45,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v1/#add-member-to-image
+ https://docs.openstack.org/api-ref/image/v1/#add-member-to-image
"""
url = 'images/%s/members/%s' % (image_id, member_id)
body = json.dumps({'member': kwargs})
@@ -58,7 +58,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v1/#remove-member
+ https://docs.openstack.org/api-ref/image/v1/#remove-member
"""
url = 'images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
diff --git a/tempest/lib/services/image/v1/images_client.py b/tempest/lib/services/image/v1/images_client.py
index 42f8cf2..0e76a63 100644
--- a/tempest/lib/services/image/v1/images_client.py
+++ b/tempest/lib/services/image/v1/images_client.py
@@ -61,7 +61,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v1/index.html#create-image
+ https://docs.openstack.org/api-ref/image/v1/index.html#create-image
"""
if headers is None:
headers = {}
@@ -79,7 +79,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v1/index.html#update-image
+ https://docs.openstack.org/api-ref/image/v1/index.html#update-image
"""
if headers is None:
headers = {}
@@ -104,7 +104,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v1/#list-images
+ https://docs.openstack.org/api-ref/image/v1/#list-images
Most parameters except the following are passed to the API without
any changes.
diff --git a/tempest/lib/services/image/v2/image_members_client.py b/tempest/lib/services/image/v2/image_members_client.py
index e5118a8..65024c7 100644
--- a/tempest/lib/services/image/v2/image_members_client.py
+++ b/tempest/lib/services/image/v2/image_members_client.py
@@ -23,7 +23,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#list-image-members
+ https://docs.openstack.org/api-ref/image/v2/#list-image-members
"""
url = 'images/%s/members' % image_id
resp, body = self.get(url)
@@ -36,7 +36,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#create-image-member
+ https://docs.openstack.org/api-ref/image/v2/#create-image-member
"""
url = 'images/%s/members' % image_id
data = json.dumps(kwargs)
@@ -50,7 +50,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#update-image-member
+ https://docs.openstack.org/api-ref/image/v2/#update-image-member
"""
url = 'images/%s/members/%s' % (image_id, member_id)
data = json.dumps(kwargs)
@@ -64,7 +64,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#show-image-member-details
+ https://docs.openstack.org/api-ref/image/v2/#show-image-member-details
"""
url = 'images/%s/members/%s' % (image_id, member_id)
resp, body = self.get(url)
@@ -76,7 +76,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#delete-image-member
+ https://docs.openstack.org/api-ref/image/v2/#delete-image-member
"""
url = 'images/%s/members/%s' % (image_id, member_id)
resp, _ = self.delete(url)
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 3c38dba..90778da 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -32,7 +32,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v2/#update-image
+ https://docs.openstack.org/api-ref/image/v2/#update-image
"""
data = json.dumps(patch)
headers = {"Content-Type": "application/openstack-images-v2.0"
@@ -47,7 +47,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v2/#create-image
+ https://docs.openstack.org/api-ref/image/v2/#create-image
"""
data = json.dumps(kwargs)
resp, body = self.post('images', data)
@@ -60,7 +60,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#deactivate-image
+ https://docs.openstack.org/api-ref/image/v2/#deactivate-image
"""
url = 'images/%s/actions/deactivate' % image_id
resp, body = self.post(url, None)
@@ -72,7 +72,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#reactivate-image
+ https://docs.openstack.org/api-ref/image/v2/#reactivate-image
"""
url = 'images/%s/actions/reactivate' % image_id
resp, body = self.post(url, None)
@@ -84,7 +84,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v2/#delete-image
+ https://docs.openstack.org/api-ref/image/v2/#delete-image
"""
url = 'images/%s' % image_id
resp, _ = self.delete(url)
@@ -96,7 +96,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v2/#list-images
+ https://docs.openstack.org/api-ref/image/v2/#list-images
"""
url = 'images'
@@ -113,7 +113,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v2/#show-image
+ https://docs.openstack.org/api-ref/image/v2/#show-image
"""
url = 'images/%s' % image_id
resp, body = self.get(url)
@@ -138,7 +138,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#upload-binary-image-data
+ https://docs.openstack.org/api-ref/image/v2/#upload-binary-image-data
"""
url = 'images/%s/file' % image_id
@@ -157,7 +157,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#download-binary-image-data
+ https://docs.openstack.org/api-ref/image/v2/#download-binary-image-data
"""
url = 'images/%s/file' % image_id
resp, body = self.get(url)
@@ -169,7 +169,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#add-image-tag
+ https://docs.openstack.org/api-ref/image/v2/#add-image-tag
"""
url = 'images/%s/tags/%s' % (image_id, tag)
resp, body = self.put(url, body=None)
@@ -181,7 +181,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/#delete-image-tag
+ https://docs.openstack.org/api-ref/image/v2/#delete-image-tag
"""
url = 'images/%s/tags/%s' % (image_id, tag)
resp, _ = self.delete(url)
diff --git a/tempest/lib/services/image/v2/namespace_objects_client.py b/tempest/lib/services/image/v2/namespace_objects_client.py
index ac2e63e..0cae816 100644
--- a/tempest/lib/services/image/v2/namespace_objects_client.py
+++ b/tempest/lib/services/image/v2/namespace_objects_client.py
@@ -27,7 +27,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-objects
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-objects
"""
url = 'metadefs/namespaces/%s/objects' % namespace
if kwargs:
@@ -42,7 +42,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-object
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-object
"""
url = 'metadefs/namespaces/%s/objects' % namespace
data = json.dumps(kwargs)
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#update-object
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#update-object
"""
url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
data = json.dumps(kwargs)
@@ -70,7 +70,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#show-object
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#show-object
"""
url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
resp, body = self.get(url)
@@ -83,7 +83,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#delete-object
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#delete-object
"""
url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
resp, _ = self.delete(url)
diff --git a/tempest/lib/services/image/v2/namespace_properties_client.py b/tempest/lib/services/image/v2/namespace_properties_client.py
index 1236b2b..5b245ca 100644
--- a/tempest/lib/services/image/v2/namespace_properties_client.py
+++ b/tempest/lib/services/image/v2/namespace_properties_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-properties
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-properties
"""
url = 'metadefs/namespaces/%s/properties' % namespace
resp, body = self.get(url)
@@ -39,7 +39,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-property
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-property
"""
url = 'metadefs/namespaces/%s/properties' % namespace
data = json.dumps(kwargs)
@@ -53,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#show-property-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#show-property-definition
"""
url = 'metadefs/namespaces/%s/properties/%s' % (namespace,
property_name)
@@ -67,7 +67,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#update-property-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#update-property-definition
"""
url = 'metadefs/namespaces/%s/properties/%s' % (namespace,
property_name)
@@ -82,7 +82,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#remove-property-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#remove-property-definition
"""
url = 'metadefs/namespaces/%s/properties/%s' % (namespace,
property_name)
diff --git a/tempest/lib/services/image/v2/namespace_tags_client.py b/tempest/lib/services/image/v2/namespace_tags_client.py
index 61cc33d..4315f16 100644
--- a/tempest/lib/services/image/v2/namespace_tags_client.py
+++ b/tempest/lib/services/image/v2/namespace_tags_client.py
@@ -27,7 +27,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-tag-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-tag-definition
"""
url = 'metadefs/namespaces/%s/tags/%s' % (namespace,
tag_name)
@@ -41,7 +41,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/image/v2/metadefs-index.html#get-tag-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#get-tag-definition
"""
url = 'metadefs/namespaces/%s/tags/%s' % (namespace,
tag_name)
@@ -55,7 +55,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#update-tag-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#update-tag-definition
"""
url = 'metadefs/namespaces/%s/tags/%s' % (namespace,
tag_name)
@@ -70,7 +70,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#delete-tag-definition
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#delete-tag-definition
"""
url = 'metadefs/namespaces/%s/tags/%s' % (namespace, tag_name)
resp, _ = self.delete(url)
@@ -82,7 +82,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-tags
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-tags
"""
url = 'metadefs/namespaces/%s/tags' % namespace
data = json.dumps(kwargs)
@@ -96,7 +96,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-tags
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-tags
"""
url = 'metadefs/namespaces/%s/tags' % namespace
if params:
@@ -111,15 +111,11 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#delete-all-tag-definitions
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#delete-all-tag-definitions
"""
url = 'metadefs/namespaces/%s/tags' % namespace
resp, _ = self.delete(url)
- # NOTE(rosmaita): Bug 1656183 fixed the success response code for
- # this call to make it consistent with the other metadefs delete
- # calls. Accept both codes in case tempest is being run against
- # an old Glance.
- self.expected_success([200, 204], resp.status)
+ self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/image/v2/namespaces_client.py b/tempest/lib/services/image/v2/namespaces_client.py
index b00de89..c0fa74a 100644
--- a/tempest/lib/services/image/v2/namespaces_client.py
+++ b/tempest/lib/services/image/v2/namespaces_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-namespace
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-namespace
"""
data = json.dumps(kwargs)
resp, body = self.post('metadefs/namespaces', data)
@@ -39,7 +39,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-namespaces
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-namespaces
"""
url = 'metadefs/namespaces'
resp, body = self.get(url)
@@ -52,7 +52,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#get-namespace-details
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#get-namespace-details
"""
url = 'metadefs/namespaces/%s' % namespace
resp, body = self.get(url)
@@ -65,7 +65,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#update-namespace
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#update-namespace
"""
# NOTE: On Glance API, we need to pass namespace on both URI
# and a request body.
@@ -83,7 +83,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#delete-namespace
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#delete-namespace
"""
url = 'metadefs/namespaces/%s' % namespace
resp, _ = self.delete(url)
diff --git a/tempest/lib/services/image/v2/resource_types_client.py b/tempest/lib/services/image/v2/resource_types_client.py
index 13259d1..ec02f1a 100644
--- a/tempest/lib/services/image/v2/resource_types_client.py
+++ b/tempest/lib/services/image/v2/resource_types_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-resource-types
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-resource-types
"""
url = 'metadefs/resource_types'
resp, body = self.get(url)
@@ -39,7 +39,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-resource-type-association
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-resource-type-association
"""
url = 'metadefs/namespaces/%s/resource_types' % namespace_id
data = json.dumps(kwargs)
@@ -53,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-resource-type-associations
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-resource-type-associations
"""
url = 'metadefs/namespaces/%s/resource_types' % namespace_id
resp, body = self.get(url)
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#remove-resource-type-association
+ https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#remove-resource-type-association
"""
url = 'metadefs/namespaces/%s/resource_types/%s' % (namespace_id,
resource_name)
diff --git a/tempest/lib/services/image/v2/versions_client.py b/tempest/lib/services/image/v2/versions_client.py
index 1adc466..1b7f806 100644
--- a/tempest/lib/services/image/v2/versions_client.py
+++ b/tempest/lib/services/image/v2/versions_client.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
@@ -26,11 +24,7 @@
"""List API versions"""
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
self.expected_success(300, resp.status)
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index 419e593..f7ac046 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -21,12 +21,16 @@
MeteringLabelsClient
from tempest.lib.services.network.networks_client import NetworksClient
from tempest.lib.services.network.ports_client import PortsClient
+from tempest.lib.services.network.qos_client import QosClient
+from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
+ QosMinimumBandwidthRulesClient
from tempest.lib.services.network.quotas_client import QuotasClient
from tempest.lib.services.network.routers_client import RoutersClient
from tempest.lib.services.network.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.lib.services.network.security_groups_client import \
SecurityGroupsClient
+from tempest.lib.services.network.segments_client import SegmentsClient
from tempest.lib.services.network.service_providers_client import \
ServiceProvidersClient
from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
@@ -37,6 +41,7 @@
__all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
'MeteringLabelRulesClient', 'MeteringLabelsClient',
'NetworksClient', 'NetworkVersionsClient', 'PortsClient',
- 'QuotasClient', 'RoutersClient', 'SecurityGroupRulesClient',
- 'SecurityGroupsClient', 'ServiceProvidersClient',
- 'SubnetpoolsClient', 'SubnetsClient', 'TagsClient']
+ 'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
+ 'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
+ 'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
+ 'SubnetsClient', 'TagsClient']
diff --git a/tempest/lib/services/network/agents_client.py b/tempest/lib/services/network/agents_client.py
index 5068121..03f2543 100644
--- a/tempest/lib/services/network/agents_client.py
+++ b/tempest/lib/services/network/agents_client.py
@@ -22,7 +22,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#update-agent
+ https://docs.openstack.org/api-ref/network/v2/#update-agent
"""
uri = '/agents/%s' % agent_id
return self.update_resource(uri, kwargs)
@@ -32,17 +32,27 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#show-agent-details
+ https://docs.openstack.org/api-ref/network/v2/#show-agent-details
"""
uri = '/agents/%s' % agent_id
return self.show_resource(uri, **fields)
+ def delete_agent(self, agent_id):
+ """Delete agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-agent
+ """
+ uri = '/agents/%s' % agent_id
+ return self.delete_resource(uri)
+
def list_agents(self, **filters):
"""List all agents.
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#list-all-agents
+ https://docs.openstack.org/api-ref/network/v2/#list-all-agents
"""
uri = '/agents'
return self.list_resources(uri, **filters)
@@ -52,7 +62,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#list-routers-hosted-by-an-l3-agent
+ https://docs.openstack.org/api-ref/network/v2/#list-routers-hosted-by-an-l3-agent
"""
uri = '/agents/%s/l3-routers' % agent_id
return self.list_resources(uri)
@@ -62,7 +72,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#schedule-router-to-an-l3-agent
+ https://docs.openstack.org/api-ref/network/v2/#schedule-router-to-an-l3-agent
"""
uri = '/agents/%s/l3-routers' % agent_id
return self.create_resource(uri, kwargs, expect_empty_body=True)
@@ -72,7 +82,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#remove-l3-router-from-an-l3-agent
+ https://docs.openstack.org/api-ref/network/v2/#remove-l3-router-from-an-l3-agent
"""
uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
return self.delete_resource(uri)
@@ -91,7 +101,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/#schedule-a-network-to-a-dhcp-agent
+ https://docs.openstack.org/api-ref/network/v2/#schedule-a-network-to-a-dhcp-agent
"""
uri = '/agents/%s/dhcp-networks' % agent_id
return self.create_resource(uri, kwargs, expect_empty_body=True)
diff --git a/tempest/lib/services/network/extensions_client.py b/tempest/lib/services/network/extensions_client.py
index 3910c84..4701cb0 100644
--- a/tempest/lib/services/network/extensions_client.py
+++ b/tempest/lib/services/network/extensions_client.py
@@ -16,9 +16,21 @@
class ExtensionsClient(base.BaseNetworkClient):
def show_extension(self, ext_alias, **fields):
+ """Show extension details.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-extension-details
+ """
uri = '/extensions/%s' % ext_alias
return self.show_resource(uri, **fields)
def list_extensions(self, **filters):
+ """List extensions.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-extensions
+ """
uri = '/extensions'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/floating_ips_client.py b/tempest/lib/services/network/floating_ips_client.py
index 2bb18e0..a63150b 100644
--- a/tempest/lib/services/network/floating_ips_client.py
+++ b/tempest/lib/services/network/floating_ips_client.py
@@ -23,7 +23,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-floating-ip
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-floating-ip
"""
uri = '/floatingips'
post_data = {'floatingip': kwargs}
@@ -34,7 +34,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-floating-ip
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-floating-ip
"""
uri = '/floatingips/%s' % floatingip_id
post_data = {'floatingip': kwargs}
@@ -45,7 +45,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-floating-ip-details
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-floating-ip-details
"""
uri = '/floatingips/%s' % floatingip_id
return self.show_resource(uri, **fields)
@@ -59,7 +59,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-floating-ips
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ips
"""
uri = '/floatingips'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/metering_label_rules_client.py b/tempest/lib/services/network/metering_label_rules_client.py
index 9542e8f..dd5fd52 100644
--- a/tempest/lib/services/network/metering_label_rules_client.py
+++ b/tempest/lib/services/network/metering_label_rules_client.py
@@ -20,17 +20,24 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/index.html#create-metering-label-rule
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-metering-label-rule
"""
uri = '/metering/metering-label-rules'
post_data = {'metering_label_rule': kwargs}
return self.create_resource(uri, post_data)
def show_metering_label_rule(self, metering_label_rule_id, **fields):
+ """Show metering label rule.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-metering-label-rule-details
+ """
uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
return self.show_resource(uri, **fields)
def delete_metering_label_rule(self, metering_label_rule_id):
+ """Delete metering label rule."""
uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
return self.delete_resource(uri)
@@ -39,7 +46,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/index.html#list-metering-label-rules
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-metering-label-rules
"""
uri = '/metering/metering-label-rules'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/metering_labels_client.py b/tempest/lib/services/network/metering_labels_client.py
index 411da1f..80ed51f 100644
--- a/tempest/lib/services/network/metering_labels_client.py
+++ b/tempest/lib/services/network/metering_labels_client.py
@@ -20,7 +20,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-metering-label
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-metering-label
"""
uri = '/metering/metering-labels'
post_data = {'metering_label': kwargs}
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-metering-label-details
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-metering-label-details
"""
uri = '/metering/metering-labels/%s' % metering_label_id
return self.show_resource(uri, **fields)
@@ -41,7 +41,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#delete-metering-label
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-metering-label
"""
uri = '/metering/metering-labels/%s' % metering_label_id
return self.delete_resource(uri)
@@ -51,7 +51,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-metering-labels
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-metering-labels
"""
uri = '/metering/metering-labels'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/networks_client.py b/tempest/lib/services/network/networks_client.py
index 77d4823..f0e735e 100644
--- a/tempest/lib/services/network/networks_client.py
+++ b/tempest/lib/services/network/networks_client.py
@@ -20,7 +20,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-network
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-network
"""
uri = '/networks'
post_data = {'network': kwargs}
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-network
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-network
"""
uri = '/networks/%s' % network_id
post_data = {'network': kwargs}
@@ -42,7 +42,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-network-details
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-network-details
"""
uri = '/networks/%s' % network_id
return self.show_resource(uri, **fields)
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-networks
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-networks
"""
uri = '/networks'
return self.list_resources(uri, **filters)
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#bulk-create-networks
+ https://docs.openstack.org/api-ref/network/v2/index.html#bulk-create-networks
"""
uri = '/networks'
return self.create_resource(uri, kwargs)
diff --git a/tempest/lib/services/network/ports_client.py b/tempest/lib/services/network/ports_client.py
index daa15d7..d77b62f 100644
--- a/tempest/lib/services/network/ports_client.py
+++ b/tempest/lib/services/network/ports_client.py
@@ -21,7 +21,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-port
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-port
"""
uri = '/ports'
post_data = {'port': kwargs}
@@ -32,7 +32,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-port
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-port
"""
uri = '/ports/%s' % port_id
post_data = {'port': kwargs}
@@ -43,7 +43,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-port-details
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-port-details
"""
uri = '/ports/%s' % port_id
return self.show_resource(uri, **fields)
@@ -53,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#delete-port
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-port
"""
uri = '/ports/%s' % port_id
return self.delete_resource(uri)
@@ -63,7 +63,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-ports
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-ports
"""
uri = '/ports'
return self.list_resources(uri, **filters)
@@ -73,7 +73,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#bulk-create-ports
+ https://docs.openstack.org/api-ref/network/v2/index.html#bulk-create-ports
"""
uri = '/ports'
return self.create_resource(uri, kwargs)
diff --git a/tempest/lib/services/network/qos_client.py b/tempest/lib/services/network/qos_client.py
new file mode 100644
index 0000000..47dc70a
--- /dev/null
+++ b/tempest/lib/services/network/qos_client.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosClient(base.BaseNetworkClient):
+
+ def create_qos_policy(self, **kwargs):
+ """Creates a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-qos-policy
+ """
+ uri = '/qos/policies'
+ post_data = {'policy': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_qos_policy(self, qos_policy_id, **kwargs):
+ """Updates a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-qos-policy
+ """
+ uri = '/qos/policies/%s' % qos_policy_id
+ post_data = {'policy': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_qos_policy(self, qos_policy_id, **fields):
+ """Show details of a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-qos-policy-details
+ """
+ uri = '/qos/policies/%s' % qos_policy_id
+ return self.show_resource(uri, **fields)
+
+ def delete_qos_policy(self, qos_policy_id):
+ """Deletes a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-qos-policy
+ """
+ uri = '/qos/policies/%s' % qos_policy_id
+ return self.delete_resource(uri)
+
+ def list_qos_policies(self, **filters):
+ """Lists QoS policies.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-qos-policies
+ """
+ uri = '/qos/policies'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py b/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py
new file mode 100644
index 0000000..dd9f45f
--- /dev/null
+++ b/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosMinimumBandwidthRulesClient(base.BaseNetworkClient):
+
+ def create_minimum_bandwidth_rule(self, qos_policy_id, **kwargs):
+ """Creates a minimum bandwidth rule for a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-minimum-bandwidth-rule
+ """
+ uri = '/qos/policies/%s/minimum_bandwidth_rules' % qos_policy_id
+ post_data = {'minimum_bandwidth_rule': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_minimum_bandwidth_rule(self, qos_policy_id, rule_id, **kwargs):
+ """Updates a minimum bandwidth rule.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-minimum-bandwidth-rule
+ """
+ uri = '/qos/policies/%s/minimum_bandwidth_rules/%s' % (
+ qos_policy_id, rule_id)
+ post_data = {'minimum_bandwidth_rule': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_minimum_bandwidth_rule(self, qos_policy_id, rule_id, **fields):
+ """Show details of a minimum bandwidth rule.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-minimum-bandwidth-rule-details
+ """
+ uri = '/qos/policies/%s/minimum_bandwidth_rules/%s' % (
+ qos_policy_id, rule_id)
+ return self.show_resource(uri, **fields)
+
+ def delete_minimum_bandwidth_rule(self, qos_policy_id, rule_id):
+ """Deletes a minimum bandwidth rule for a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-minimum-bandwidth-rule
+ """
+ uri = '/qos/policies/%s/minimum_bandwidth_rules/%s' % (
+ qos_policy_id, rule_id)
+ return self.delete_resource(uri)
+
+ def list_minimum_bandwidth_rules(self, qos_policy_id, **filters):
+ """Lists all minimum bandwidth rules for a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-minimum-bandwidth-rules-for-qos-policy
+ """
+ uri = '/qos/policies/%s/minimum_bandwidth_rules' % qos_policy_id
+ return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/quotas_client.py b/tempest/lib/services/network/quotas_client.py
index e9666de..997d201 100644
--- a/tempest/lib/services/network/quotas_client.py
+++ b/tempest/lib/services/network/quotas_client.py
@@ -22,7 +22,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/index.html#update-quota-for-a-project
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-quota-for-a-project
"""
put_body = {'quota': kwargs}
uri = '/quotas/%s' % tenant_id
@@ -35,10 +35,22 @@
return self.delete_resource(uri)
def show_quotas(self, tenant_id, **fields):
+ """Show quota for a project.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-quotas-for-a-project
+ """
uri = '/quotas/%s' % tenant_id
return self.show_resource(uri, **fields)
def list_quotas(self, **filters):
+ """List quotas for projects with non default quota values.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-quotas-for-projects-with-non-default-quota-values
+ """
uri = '/quotas'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/routers_client.py b/tempest/lib/services/network/routers_client.py
index 19b7627..2af487f 100644
--- a/tempest/lib/services/network/routers_client.py
+++ b/tempest/lib/services/network/routers_client.py
@@ -20,7 +20,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-router
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-router
"""
post_body = {'router': kwargs}
uri = '/routers'
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-router
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-router
"""
uri = '/routers/%s' % router_id
update_body = {'router': kwargs}
@@ -42,7 +42,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-router-details
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-router-details
"""
uri = '/routers/%s' % router_id
return self.show_resource(uri, **fields)
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-routers
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-routers
"""
uri = '/routers'
return self.list_resources(uri, **filters)
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#add-interface-to-router
+ https://docs.openstack.org/api-ref/network/v2/index.html#add-interface-to-router
"""
uri = '/routers/%s/add_router_interface' % router_id
return self.update_resource(uri, kwargs)
@@ -76,7 +76,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#remove-interface-from-router
+ https://docs.openstack.org/api-ref/network/v2/index.html#remove-interface-from-router
"""
uri = '/routers/%s/remove_router_interface' % router_id
return self.update_resource(uri, kwargs)
diff --git a/tempest/lib/services/network/security_group_rules_client.py b/tempest/lib/services/network/security_group_rules_client.py
index d2bc4a9..d62b05f 100644
--- a/tempest/lib/services/network/security_group_rules_client.py
+++ b/tempest/lib/services/network/security_group_rules_client.py
@@ -20,7 +20,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-security-group-rule
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-security-group-rule
"""
uri = '/security-group-rules'
post_data = {'security_group_rule': kwargs}
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-security-group-rule
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-security-group-rule
"""
uri = '/security-group-rules/%s' % security_group_rule_id
return self.show_resource(uri, **fields)
@@ -45,7 +45,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-security-group-rules
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-security-group-rules
"""
uri = '/security-group-rules'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/security_groups_client.py b/tempest/lib/services/network/security_groups_client.py
index d3ebf20..b641839 100644
--- a/tempest/lib/services/network/security_groups_client.py
+++ b/tempest/lib/services/network/security_groups_client.py
@@ -21,7 +21,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-security-group
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-security-group
"""
uri = '/security-groups'
post_data = {'security_group': kwargs}
@@ -32,7 +32,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-security-group
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-security-group
"""
uri = '/security-groups/%s' % security_group_id
post_data = {'security_group': kwargs}
@@ -43,7 +43,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-security-group
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-security-group
"""
uri = '/security-groups/%s' % security_group_id
return self.show_resource(uri, **fields)
@@ -53,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#delete-security-group
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-security-group
"""
uri = '/security-groups/%s' % security_group_id
return self.delete_resource(uri)
@@ -63,7 +63,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-security-groups
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-security-groups
"""
uri = '/security-groups'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/segments_client.py b/tempest/lib/services/network/segments_client.py
new file mode 100644
index 0000000..30e6b23
--- /dev/null
+++ b/tempest/lib/services/network/segments_client.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class SegmentsClient(base.BaseNetworkClient):
+
+ def create_segment(self, **kwargs):
+ """Creates a segment.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-segment
+ """
+ uri = '/segments'
+ post_data = {'segment': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_segment(self, segment_id, **kwargs):
+ """Updates a segment.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-segment
+ """
+ uri = '/segments/%s' % segment_id
+ post_data = {'segment': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_segment(self, segment_id, **fields):
+ """Shows details of a segment.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-segment-details
+ """
+ uri = '/segments/%s' % segment_id
+ return self.show_resource(uri, **fields)
+
+ def delete_segment(self, segment_id):
+ """Deletes a segment"""
+ uri = '/segments/%s' % segment_id
+ return self.delete_resource(uri)
+
+ def list_segments(self, **filters):
+ """Lists segments.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-segments
+ """
+ uri = '/segments'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/service_providers_client.py b/tempest/lib/services/network/service_providers_client.py
index 01313a0..4ce617f 100644
--- a/tempest/lib/services/network/service_providers_client.py
+++ b/tempest/lib/services/network/service_providers_client.py
@@ -20,7 +20,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/network/v2/index.html#list-service-providers
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-service-providers
"""
uri = '/service-providers'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/subnetpools_client.py b/tempest/lib/services/network/subnetpools_client.py
index 7e77e30..635c449 100644
--- a/tempest/lib/services/network/subnetpools_client.py
+++ b/tempest/lib/services/network/subnetpools_client.py
@@ -22,7 +22,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-subnet-pools
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-subnet-pools
"""
uri = '/subnetpools'
return self.list_resources(uri, **filters)
@@ -32,7 +32,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-subnet-pool
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-subnet-pool
"""
uri = '/subnetpools'
post_data = {'subnetpool': kwargs}
@@ -43,7 +43,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-subnet-pool
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-subnet-pool
"""
uri = '/subnetpools/%s' % subnetpool_id
return self.show_resource(uri, **fields)
@@ -53,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-subnet-pool
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-subnet-pool
"""
uri = '/subnetpools/%s' % subnetpool_id
post_data = {'subnetpool': kwargs}
diff --git a/tempest/lib/services/network/subnets_client.py b/tempest/lib/services/network/subnets_client.py
index b843f84..00dd423 100644
--- a/tempest/lib/services/network/subnets_client.py
+++ b/tempest/lib/services/network/subnets_client.py
@@ -20,7 +20,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#create-subnet
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-subnet
"""
uri = '/subnets'
post_data = {'subnet': kwargs}
@@ -31,7 +31,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#update-subnet
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-subnet
"""
uri = '/subnets/%s' % subnet_id
post_data = {'subnet': kwargs}
@@ -42,7 +42,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#show-subnet-details
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-subnet-details
"""
uri = '/subnets/%s' % subnet_id
return self.show_resource(uri, **fields)
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#list-subnets
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-subnets
"""
uri = '/subnets'
return self.list_resources(uri, **filters)
@@ -66,7 +66,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#bulk-create-subnet
+ https://docs.openstack.org/api-ref/network/v2/index.html#bulk-create-subnet
"""
uri = '/subnets'
return self.create_resource(uri, kwargs)
diff --git a/tempest/lib/services/network/tags_client.py b/tempest/lib/services/network/tags_client.py
index 5d49a79..71e17c7 100644
--- a/tempest/lib/services/network/tags_client.py
+++ b/tempest/lib/services/network/tags_client.py
@@ -25,7 +25,7 @@
"""Adds a tag on the resource.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#add-a-tag
+ https://docs.openstack.org/api-ref/network/v2/index.html#add-a-tag
"""
uri = '/%s/%s/tags/%s' % (resource_type, resource_id, tag)
return self.update_resource(
@@ -36,7 +36,7 @@
"""Confirm that a given tag is set on the resource.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#confirm-a-tag
+ https://docs.openstack.org/api-ref/network/v2/index.html#confirm-a-tag
"""
# TODO(felipemonteiro): Use the "check_resource" method in
# ``BaseNetworkClient`` once it has been implemented.
@@ -50,7 +50,7 @@
"""Replace all tags on the resource.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#replace-all-tags
+ https://docs.openstack.org/api-ref/network/v2/index.html#replace-all-tags
"""
uri = '/%s/%s/tags' % (resource_type, resource_id)
put_body = {"tags": tags}
@@ -60,7 +60,7 @@
"""Removes a tag on the resource.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#remove-a-tag
+ https://docs.openstack.org/api-ref/network/v2/index.html#remove-a-tag
"""
uri = '/%s/%s/tags/%s' % (resource_type, resource_id, tag)
return self.delete_resource(uri)
@@ -69,7 +69,7 @@
"""Removes all tags on the resource.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#remove-all-tags
+ https://docs.openstack.org/api-ref/network/v2/index.html#remove-all-tags
"""
uri = '/%s/%s/tags' % (resource_type, resource_id)
return self.delete_resource(uri)
@@ -78,7 +78,7 @@
"""Retrieves the tags for a resource.
For more information, please refer to the official API reference:
- http://developer.openstack.org/api-ref/networking/v2/index.html#obtain-tag-list
+ https://docs.openstack.org/api-ref/network/v2/index.html#obtain-tag-list
"""
uri = '/%s/%s/tags' % (resource_type, resource_id)
return self.list_resources(uri)
diff --git a/tempest/lib/services/network/versions_client.py b/tempest/lib/services/network/versions_client.py
index f87fe87..5aa7964 100644
--- a/tempest/lib/services/network/versions_client.py
+++ b/tempest/lib/services/network/versions_client.py
@@ -12,32 +12,36 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
+from tempest.lib.common import rest_client
from tempest.lib.services.network import base
class NetworkVersionsClient(base.BaseNetworkClient):
def list_versions(self):
- """Do a GET / to fetch available API version information."""
+ """Do a GET / to fetch available API version information.
- version_url = self._get_base_version_url()
+ For more information, please refer to the official API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-api-versions
+ """
- # Note: we do a raw_request here because we want to use
+ # Note: we do a self.get('/') here because we want to use
# an unversioned URL, not "v2/$project_id/".
- # Since raw_request doesn't log anything, we do that too.
- start = time.time()
- self._log_request_start('GET', version_url)
- response, body = self.raw_request(version_url, 'GET')
- self._error_checker(response, body)
- end = time.time()
- self._log_request('GET', version_url, response,
- secs=(end - start), resp_body=body)
-
- self.response_checker('GET', response, body)
- self.expected_success(200, response.status)
+ resp, body = self.get('/')
body = json.loads(body)
- return body
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_version(self, version):
+ """Do a GET /<version> to fetch available resources.
+
+ For more information, please refer to the official API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-api-v2-details
+ """
+
+ resp, body = self.get(version + '/')
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 6b097c1..8c15a88 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -34,7 +34,7 @@
Account Metadata can be created, updated or deleted based on
metadata header or value. For detailed info, please refer to the
official API reference:
- https://developer.openstack.org/api-ref/object-store/#create-update-or-delete-account-metadata
+ https://docs.openstack.org/api-ref/object-store/#create-update-or-delete-account-metadata
"""
headers = {}
if create_update_metadata:
@@ -63,7 +63,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/object-store/#show-account-details-and-list-containers
+ https://docs.openstack.org/api-ref/object-store/#show-account-details-and-list-containers
"""
url = '?%s' % urllib.urlencode(params) if params else ''
diff --git a/tempest/lib/services/object_storage/capabilities_client.py b/tempest/lib/services/object_storage/capabilities_client.py
index d31bbc2..f08bd9a 100644
--- a/tempest/lib/services/object_storage/capabilities_client.py
+++ b/tempest/lib/services/object_storage/capabilities_client.py
@@ -21,9 +21,10 @@
class CapabilitiesClient(rest_client.RestClient):
def list_capabilities(self):
- self.skip_path()
try:
- resp, body = self.get('info')
+ url = self._get_base_version_url() + 'info'
+ resp, body = self.raw_request(url, 'GET')
+ self._error_checker(resp, body)
finally:
self.reset_path()
body = json.loads(body)
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 430e0d4..027fb1f 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -30,7 +30,7 @@
with optional metadata passed in as a dictionary.
Full list of allowed headers or value, please refer to the
official API reference:
- https://developer.openstack.org/api-ref/object-store/#create-container
+ https://docs.openstack.org/api-ref/object-store/#create-container
"""
url = str(container_name)
@@ -61,7 +61,7 @@
Container Metadata can be created, updated or deleted based on
metadata header or value. For detailed info, please refer to the
official API reference:
- https://developer.openstack.org/api-ref/object-store/#create-update-or-delete-container-metadata
+ https://docs.openstack.org/api-ref/object-store/#create-update-or-delete-container-metadata
"""
url = str(container_name)
headers = {}
@@ -97,7 +97,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/object-store/#show-container-details-and-list-objects
+ https://docs.openstack.org/api-ref/object-store/#show-container-details-and-list-objects
"""
url = str(container_name)
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
new file mode 100644
index 0000000..5c20c57
--- /dev/null
+++ b/tempest/lib/services/placement/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement.placement_client import \
+ PlacementClient
+
+__all__ = ['PlacementClient']
diff --git a/tempest/lib/services/placement/base_placement_client.py b/tempest/lib/services/placement/base_placement_client.py
new file mode 100644
index 0000000..505a515
--- /dev/null
+++ b/tempest/lib/services/placement/base_placement_client.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common import api_version_utils
+from tempest.lib.common import rest_client
+
+PLACEMENT_MICROVERSION = None
+
+
+class BasePlacementClient(rest_client.RestClient):
+
+ api_microversion_header_name = 'OpenStack-API-Version'
+ version_header_value = 'placement %s'
+
+ def get_headers(self):
+ headers = super(BasePlacementClient, self).get_headers()
+ if PLACEMENT_MICROVERSION:
+ headers[self.api_microversion_header_name] = \
+ self.version_header_value % PLACEMENT_MICROVERSION
+ return headers
+
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, chunked=False):
+ resp, resp_body = super(BasePlacementClient, self).request(
+ method, url, extra_headers, headers, body, chunked)
+ if (PLACEMENT_MICROVERSION and
+ PLACEMENT_MICROVERSION != api_version_utils.LATEST_MICROVERSION):
+ api_version_utils.assert_version_header_matches_request(
+ self.api_microversion_header_name,
+ self.version_header_value % PLACEMENT_MICROVERSION,
+ resp)
+ return resp, resp_body
diff --git a/tempest/lib/services/placement/placement_client.py b/tempest/lib/services/placement/placement_client.py
new file mode 100644
index 0000000..b8e91b8
--- /dev/null
+++ b/tempest/lib/services/placement/placement_client.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class PlacementClient(base_placement_client.BasePlacementClient):
+
+ def list_allocation_candidates(self, **params):
+ """List allocation candidates.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/placement/#list-allocation-candidates
+ """
+ url = '/allocation_candidates'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_allocations(self, consumer_uuid):
+ """List all allocation records for the consumer.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/placement/#list-allocations
+ """
+ url = '/allocations/%s' % consumer_uuid
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/backups_client.py b/tempest/lib/services/volume/v1/backups_client.py
index 77c40b3..2289253 100644
--- a/tempest/lib/services/volume/v1/backups_client.py
+++ b/tempest/lib/services/volume/v1/backups_client.py
@@ -28,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v2/#create-backup
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-backup
"""
post_body = json.dumps({'backup': kwargs})
resp, body = self.post('backups', post_body)
@@ -41,7 +41,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v2/#restore-backup
+ https://docs.openstack.org/api-ref/block-storage/v2/#restore-backup
"""
post_body = json.dumps({'restore': kwargs})
resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
@@ -83,6 +83,9 @@
def import_backup(self, **kwargs):
"""Import backup metadata record."""
+ # TODO(linanbj): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
post_body = json.dumps({'backup-record': kwargs})
resp, body = self.post("backups/import_record", post_body)
body = json.loads(body)
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
index 1fde79f..8e75ff9 100644
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ b/tempest/lib/services/volume/v1/encryption_types_client.py
@@ -51,7 +51,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#create-an-encryption-type-for-v2
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-an-encryption-type-for-v2
"""
url = "/types/%s/encryption" % volume_type_id
post_body = json.dumps({'encryption': kwargs})
diff --git a/tempest/lib/services/volume/v1/hosts_client.py b/tempest/lib/services/volume/v1/hosts_client.py
index 9b19b84..f344678 100644
--- a/tempest/lib/services/volume/v1/hosts_client.py
+++ b/tempest/lib/services/volume/v1/hosts_client.py
@@ -27,7 +27,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#list-all-hosts
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-all-hosts
"""
url = 'os-hosts'
if params:
diff --git a/tempest/lib/services/volume/v1/qos_client.py b/tempest/lib/services/volume/v1/qos_client.py
index 593bddd..67f2ead 100644
--- a/tempest/lib/services/volume/v1/qos_client.py
+++ b/tempest/lib/services/volume/v1/qos_client.py
@@ -43,7 +43,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v2/#create-qos-specification
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-qos-specification
"""
post_body = json.dumps({'qos_specs': kwargs})
resp, body = self.post('qos-specs', post_body)
@@ -79,7 +79,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v2/#set-keys-in-qos-specification
+ https://docs.openstack.org/api-ref/block-storage/v2/#set-keys-in-qos-specification
"""
put_body = json.dumps({"qos_specs": kwargs})
resp, body = self.put('qos-specs/%s' % qos_id, put_body)
@@ -94,7 +94,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#unset-keys-in-qos-specification
+ https://docs.openstack.org/api-ref/block-storage/v2/#unset-keys-in-qos-specification
"""
put_body = json.dumps({'keys': keys})
resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
diff --git a/tempest/lib/services/volume/v1/quotas_client.py b/tempest/lib/services/volume/v1/quotas_client.py
index 84f34f2..7f191ca 100644
--- a/tempest/lib/services/volume/v1/quotas_client.py
+++ b/tempest/lib/services/volume/v1/quotas_client.py
@@ -47,7 +47,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#update-quotas
+ https://docs.openstack.org/api-ref/block-storage/v2/#update-quotas
"""
put_body = jsonutils.dumps({'quota_set': kwargs})
resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
diff --git a/tempest/lib/services/volume/v1/snapshots_client.py b/tempest/lib/services/volume/v1/snapshots_client.py
index 51f7b9b..7dfdcf2 100644
--- a/tempest/lib/services/volume/v1/snapshots_client.py
+++ b/tempest/lib/services/volume/v1/snapshots_client.py
@@ -27,8 +27,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#list-snapshots
- https://developer.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
"""
url = 'snapshots'
if detail:
@@ -46,7 +46,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
+ https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
"""
url = "snapshots/%s" % snapshot_id
resp, body = self.get(url)
@@ -59,7 +59,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#create-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-snapshot
"""
post_body = json.dumps({'snapshot': kwargs})
resp, body = self.post('snapshots', post_body)
@@ -72,7 +72,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#delete-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v2/#delete-snapshot
"""
resp, body = self.delete("snapshots/%s" % snapshot_id)
self.expected_success(202, resp.status)
@@ -124,7 +124,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#update-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot
"""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
@@ -137,7 +137,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
+ https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
"""
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.get(url)
@@ -150,7 +150,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
+ https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
"""
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata" % snapshot_id
diff --git a/tempest/lib/services/volume/v1/types_client.py b/tempest/lib/services/volume/v1/types_client.py
index 58a80b7..d434e65 100644
--- a/tempest/lib/services/volume/v1/types_client.py
+++ b/tempest/lib/services/volume/v1/types_client.py
@@ -40,7 +40,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#list-all-volume-types-for-v2
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-all-volume-types-for-v2
"""
url = 'types'
if params:
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#show-volume-type-details-for-v2
+ https://docs.openstack.org/api-ref/block-storage/v2/#show-volume-type-details-for-v2
"""
url = "types/%s" % volume_type_id
resp, body = self.get(url)
@@ -69,7 +69,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#create-volume-type-for-v2
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-volume-type-for-v2
"""
post_body = json.dumps({'volume_type': kwargs})
resp, body = self.post('types', post_body)
@@ -82,7 +82,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#delete-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v2/#delete-volume-type
"""
resp, body = self.delete("types/%s" % volume_type_id)
self.expected_success(202, resp.status)
@@ -137,7 +137,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#update-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v2/#update-volume-type
"""
put_body = json.dumps({'volume_type': kwargs})
resp, body = self.put('types/%s' % volume_type_id, put_body)
@@ -149,13 +149,14 @@
extra_specs):
"""Update a volume_type extra spec.
- volume_type_id: Id of volume_type.
- extra_spec_name: Name of the extra spec to be updated.
- extra_spec: A dictionary of with key as extra_spec_name and the
- updated value.
+ :param volume_type_id: Id of volume_type.
+ :param extra_spec_name: Name of the extra spec to be updated.
+ :param extra_specs: A dictionary of with key as extra_spec_name and the
+ updated value.
+
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
"""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_spec_name)
put_body = json.dumps(extra_specs)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
index 0e6ea9f..4ed5eb1 100644
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -41,8 +41,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#list-volumes
- https://developer.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-volumes
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
"""
url = 'volumes'
if detail:
@@ -68,7 +68,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#create-volume
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
@@ -81,7 +81,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#update-volume
+ https://docs.openstack.org/api-ref/block-storage/v2/#update-volume
"""
put_body = json.dumps({'volume': kwargs})
resp, body = self.put('volumes/%s' % volume_id, put_body)
@@ -109,7 +109,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
+ https://docs.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
"""
post_body = json.dumps({'os-attach': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -166,7 +166,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#extend-volume-size
+ https://docs.openstack.org/api-ref/block-storage/v2/#extend-volume-size
"""
post_body = json.dumps({'os-extend': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -179,7 +179,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
+ https://docs.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
"""
post_body = json.dumps({'os-reset_status': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
@@ -191,7 +191,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#create-volume-transfer
+ https://docs.openstack.org/api-ref/block-storage/v2/#create-volume-transfer
"""
post_body = json.dumps({'transfer': kwargs})
resp, body = self.post('os-volume-transfer', post_body)
@@ -212,7 +212,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#list-volume-transfers
+ https://docs.openstack.org/api-ref/block-storage/v2/#list-volume-transfers
"""
url = 'os-volume-transfer'
if params:
@@ -233,7 +233,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/#accept-volume-transfer
+ https://docs.openstack.org/api-ref/block-storage/v2/#accept-volume-transfer
"""
url = 'os-volume-transfer/%s/accept' % transfer_id
post_body = json.dumps({'accept': kwargs})
diff --git a/tempest/lib/services/volume/v3/__init__.py b/tempest/lib/services/volume/v3/__init__.py
index a1b7de3..e2fa836 100644
--- a/tempest/lib/services/volume/v3/__init__.py
+++ b/tempest/lib/services/volume/v3/__init__.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
+from tempest.lib.services.volume.v3.attachments_client import AttachmentsClient
from tempest.lib.services.volume.v3.availability_zone_client \
import AvailabilityZoneClient
from tempest.lib.services.volume.v3.backups_client import BackupsClient
@@ -43,12 +44,11 @@
from tempest.lib.services.volume.v3.volume_manage_client import \
VolumeManageClient
from tempest.lib.services.volume.v3.volumes_client import VolumesClient
-
-__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'BaseClient',
- 'CapabilitiesClient', 'EncryptionTypesClient', 'ExtensionsClient',
- 'GroupSnapshotsClient', 'GroupTypesClient', 'GroupsClient',
- 'HostsClient', 'LimitsClient', 'MessagesClient', 'QosSpecsClient',
- 'QuotaClassesClient', 'QuotasClient', 'SchedulerStatsClient',
- 'ServicesClient', 'SnapshotManageClient', 'SnapshotsClient',
- 'TransfersClient', 'TypesClient', 'VersionsClient',
- 'VolumeManageClient', 'VolumesClient']
+__all__ = ['AttachmentsClient', 'AvailabilityZoneClient', 'BackupsClient',
+ 'BaseClient', 'CapabilitiesClient', 'EncryptionTypesClient',
+ 'ExtensionsClient', 'GroupSnapshotsClient', 'GroupTypesClient',
+ 'GroupsClient', 'HostsClient', 'LimitsClient', 'MessagesClient',
+ 'QosSpecsClient', 'QuotaClassesClient', 'QuotasClient',
+ 'SchedulerStatsClient', 'ServicesClient', 'SnapshotManageClient',
+ 'SnapshotsClient', 'TransfersClient', 'TypesClient',
+ 'VersionsClient', 'VolumeManageClient', 'VolumesClient']
diff --git a/tempest/lib/services/volume/v3/attachments_client.py b/tempest/lib/services/volume/v3/attachments_client.py
new file mode 100644
index 0000000..5e448f7
--- /dev/null
+++ b/tempest/lib/services/volume/v3/attachments_client.py
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.volume import base_client
+
+
+class AttachmentsClient(base_client.BaseClient):
+ """Client class to send CRUD attachment V3 API requests"""
+
+ def show_attachment(self, attachment_id):
+ """Show volume attachment."""
+ url = "attachments/%s" % (attachment_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
index f2d2d21..970471e 100644
--- a/tempest/lib/services/volume/v3/backups_client.py
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -29,7 +29,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-backup
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-backup
"""
post_body = json.dumps({'backup': kwargs})
resp, body = self.post('backups', post_body)
@@ -42,7 +42,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#update-a-backup
+ https://docs.openstack.org/api-ref/block-storage/v3/#update-a-backup
"""
put_body = json.dumps({'backup': kwargs})
resp, body = self.put('backups/%s' % backup_id, put_body)
@@ -55,7 +55,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#restore-a-backup
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#restore-a-backup
"""
post_body = json.dumps({'restore': kwargs})
resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
@@ -82,8 +82,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-backups-for-project
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-backups-with-detail
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-backups-for-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-backups-with-detail
"""
url = "backups"
if detail:
@@ -104,7 +104,12 @@
return rest_client.ResponseBody(resp, body)
def import_backup(self, **kwargs):
- """Import backup metadata record."""
+ """Import backup metadata record.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#import-a-backup
+ """
post_body = json.dumps({'backup-record': kwargs})
resp, body = self.post("backups/import_record", post_body)
body = json.loads(body)
diff --git a/tempest/lib/services/volume/v3/capabilities_client.py b/tempest/lib/services/volume/v3/capabilities_client.py
index 7ebcd69..dc850a8 100644
--- a/tempest/lib/services/volume/v3/capabilities_client.py
+++ b/tempest/lib/services/volume/v3/capabilities_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import capabilities as schema
from tempest.lib.common import rest_client
@@ -25,10 +26,10 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v2/index.html#show-back-end-capabilities
+ https://docs.openstack.org/api-ref/block-storage/v2/index.html#show-back-end-capabilities
"""
url = 'capabilities/%s' % host
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_backend_capabilities, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/encryption_types_client.py b/tempest/lib/services/volume/v3/encryption_types_client.py
index 03de187..7cced57 100644
--- a/tempest/lib/services/volume/v3/encryption_types_client.py
+++ b/tempest/lib/services/volume/v3/encryption_types_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import encryption_types as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -43,7 +44,7 @@
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_encryption_type, resp, body)
return rest_client.ResponseBody(resp, body)
def show_encryption_specs_item(self, volume_type_id, key):
@@ -51,7 +52,7 @@
url = "/types/%s/encryption/%s" % (volume_type_id, key)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_encryption_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
def create_encryption_type(self, volume_type_id, **kwargs):
@@ -59,20 +60,20 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-an-encryption-type
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-an-encryption-type
"""
url = "/types/%s/encryption" % volume_type_id
post_body = json.dumps({'encryption': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.create_encryption_type, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_encryption_type(self, volume_type_id):
"""Delete the encryption type for the specified volume-type."""
resp, body = self.delete(
"/types/%s/encryption/provider" % volume_type_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_encryption_type, resp, body)
return rest_client.ResponseBody(resp, body)
def update_encryption_type(self, volume_type_id, **kwargs):
@@ -80,11 +81,11 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-an-encryption-type
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-an-encryption-type
"""
url = "/types/%s/encryption/provider" % volume_type_id
put_body = json.dumps({'encryption': kwargs})
resp, body = self.put(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.update_encryption_type, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/extensions_client.py b/tempest/lib/services/volume/v3/extensions_client.py
index 45b7a56..f1fe5c9 100644
--- a/tempest/lib/services/volume/v3/extensions_client.py
+++ b/tempest/lib/services/volume/v3/extensions_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import extensions as schema
from tempest.lib.common import rest_client
@@ -25,5 +26,5 @@
url = 'extensions'
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_extensions, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/group_snapshots_client.py b/tempest/lib/services/volume/v3/group_snapshots_client.py
index 16412d3..e425a3f 100644
--- a/tempest/lib/services/volume/v3/group_snapshots_client.py
+++ b/tempest/lib/services/volume/v3/group_snapshots_client.py
@@ -29,7 +29,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#create-group-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v3/#create-group-snapshot
"""
post_body = json.dumps({'group_snapshot': kwargs})
resp, body = self.post('group_snapshots', post_body)
@@ -41,7 +41,7 @@
"""Deletes a group snapshot.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#delete-group-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v3/#delete-group-snapshot
"""
resp, body = self.delete('group_snapshots/%s' % group_snapshot_id)
self.expected_success(202, resp.status)
@@ -51,7 +51,7 @@
"""Returns the details of a single group snapshot.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#show-group-snapshot-details
+ https://docs.openstack.org/api-ref/block-storage/v3/#show-group-snapshot-details
"""
url = "group_snapshots/%s" % str(group_snapshot_id)
resp, body = self.get(url)
@@ -63,8 +63,8 @@
"""Information for all the tenant's group snapshots.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#list-group-snapshots
- https://developer.openstack.org/api-ref/block-storage/v3/#list-group-snapshots-with-details
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-group-snapshots
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-group-snapshots-with-details
"""
url = "group_snapshots"
if detail:
@@ -80,7 +80,7 @@
"""Resets group snapshot status.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#reset-group-snapshot-status
+ https://docs.openstack.org/api-ref/block-storage/v3/#reset-group-snapshot-status
"""
post_body = json.dumps({'reset_status': {'status': status_to_set}})
resp, body = self.post('group_snapshots/%s/action' % group_snapshot_id,
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index 1ccb9f8..e0bf5e2 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import group_types as schema
from tempest.lib.common import rest_client
from tempest.lib.services.volume import base_client
@@ -33,18 +34,18 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#create-group-type
+ https://docs.openstack.org/api-ref/block-storage/v3/#create-group-type
"""
post_body = json.dumps({'group_type': kwargs})
resp, body = self.post('group_types', post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.create_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_group_type(self, group_type_id):
"""Deletes the specified group_type."""
resp, body = self.delete("group_types/%s" % group_type_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def list_group_types(self, **params):
@@ -52,7 +53,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#list-group-types
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-group-types
"""
url = 'group_types'
if params:
@@ -60,6 +61,18 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_group_types, resp, body)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_default_group_type(self):
+ """Returns the details of default group_type.
+
+ For more information, please refer to the official API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/#show-default-group-type-details
+ """
+ url = 'group_types/default'
+ resp, body = self.get(url)
+ body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
@@ -67,12 +80,12 @@
"""Returns the details of a single group_type.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#show-group-type-details
+ https://docs.openstack.org/api-ref/block-storage/v3/#show-group-type-details
"""
url = "group_types/%s" % group_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def update_group_type(self, group_type_id, **kwargs):
@@ -80,12 +93,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#update-group-type
+ https://docs.openstack.org/api-ref/block-storage/v3/#update-group-type
"""
post_body = json.dumps({'group_type': kwargs})
resp, body = self.put('group_types/%s' % group_type_id, post_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.update_group_type, resp, body)
return rest_client.ResponseBody(resp, body)
def create_or_update_group_type_specs(self, group_type_id, group_specs):
@@ -93,13 +106,14 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#create-or-update-group-specs-for-a-group-type
+ https://docs.openstack.org/api-ref/block-storage/v3/#create-or-update-group-specs-for-a-group-type
"""
url = "group_types/%s/group_specs" % group_type_id
post_body = json.dumps({'group_specs': group_specs})
resp, body = self.post(url, post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(
+ schema.create_or_update_group_type_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def list_group_type_specs(self, group_type_id):
@@ -107,7 +121,7 @@
url = 'group_types/%s/group_specs' % group_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_group_type_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def show_group_type_specs_item(self, group_type_id, spec_id):
@@ -115,7 +129,7 @@
url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_group_type_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
def update_group_type_specs_item(self, group_type_id, spec_id, spec):
@@ -123,18 +137,18 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#update-one-specific-group-spec-for-a-group-type
+ https://docs.openstack.org/api-ref/block-storage/v3/#update-one-specific-group-spec-for-a-group-type
"""
url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
put_body = json.dumps(spec)
resp, body = self.put(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.update_group_type_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_group_type_specs_item(self, group_type_id, spec_id):
"""Deletes specified item of group specs for a given group type."""
resp, body = self.delete("group_types/%s/group_specs/%s" % (
group_type_id, spec_id))
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_group_type_specs_item, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/groups_client.py b/tempest/lib/services/volume/v3/groups_client.py
index 3cf1e6a..ffae232 100644
--- a/tempest/lib/services/volume/v3/groups_client.py
+++ b/tempest/lib/services/volume/v3/groups_client.py
@@ -30,7 +30,7 @@
group_type and volume_types are required parameters in kwargs.
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#create-group
+ https://docs.openstack.org/api-ref/block-storage/v3/#create-group
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.post('groups', post_body)
@@ -43,7 +43,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#delete-group
+ https://docs.openstack.org/api-ref/block-storage/v3/#delete-group
"""
post_body = {'delete-volumes': delete_volumes}
post_body = json.dumps({'delete': post_body})
@@ -57,7 +57,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#show-group-details
+ https://docs.openstack.org/api-ref/block-storage/v3/#show-group-details
"""
url = "groups/%s" % str(group_id)
resp, body = self.get(url)
@@ -70,8 +70,8 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#list-groups
- https://developer.openstack.org/api-ref/block-storage/v3/#list-groups-with-details
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-groups
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-groups-with-details
"""
url = "groups"
if detail:
@@ -88,7 +88,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#create-group-from-source
+ https://docs.openstack.org/api-ref/block-storage/v3/#create-group-from-source
"""
post_body = json.dumps({'create-from-src': kwargs})
resp, body = self.post('groups/action', post_body)
@@ -101,7 +101,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#update-group
+ https://docs.openstack.org/api-ref/block-storage/v3/#update-group
"""
put_body = json.dumps({'group': kwargs})
resp, body = self.put('groups/%s' % group_id, put_body)
@@ -112,7 +112,7 @@
"""Resets group status.
For more information, please refer to the official API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#reset-group-status
+ https://docs.openstack.org/api-ref/block-storage/v3/#reset-group-status
"""
post_body = json.dumps({'reset_status': {'status': status_to_set}})
resp, body = self.post('groups/%s/action' % group_id, post_body)
diff --git a/tempest/lib/services/volume/v3/hosts_client.py b/tempest/lib/services/volume/v3/hosts_client.py
index 8b65805..019a852 100644
--- a/tempest/lib/services/volume/v3/hosts_client.py
+++ b/tempest/lib/services/volume/v3/hosts_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import hosts as schema
from tempest.lib.common import rest_client
@@ -27,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-all-hosts-for-a-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-all-hosts-for-a-project
"""
url = 'os-hosts'
if params:
@@ -35,13 +36,13 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_hosts, resp, body)
return rest_client.ResponseBody(resp, body)
def show_host(self, host_name):
"""Show host details."""
url = 'os-hosts/%s' % host_name
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.show_host, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/limits_client.py b/tempest/lib/services/volume/v3/limits_client.py
index 9500254..a8d1377 100644
--- a/tempest/lib/services/volume/v3/limits_client.py
+++ b/tempest/lib/services/volume/v3/limits_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import limits as schema
from tempest.lib.common import rest_client
@@ -26,5 +27,5 @@
url = "limits"
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_limits, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/messages_client.py b/tempest/lib/services/volume/v3/messages_client.py
index 47538cd..b770fac 100644
--- a/tempest/lib/services/volume/v3/messages_client.py
+++ b/tempest/lib/services/volume/v3/messages_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import messages as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
@@ -28,7 +29,7 @@
url = 'messages/%s' % str(message_id)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_message, resp, body)
return rest_client.ResponseBody(resp, body)
def list_messages(self):
@@ -36,14 +37,14 @@
url = 'messages'
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_messages, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_message(self, message_id):
"""Delete a single message."""
url = 'messages/%s' % str(message_id)
resp, body = self.delete(url)
- self.expected_success(204, resp.status)
+ self.validate_response(schema.delete_message, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
diff --git a/tempest/lib/services/volume/v3/qos_client.py b/tempest/lib/services/volume/v3/qos_client.py
index 8f4d37f..752e381 100644
--- a/tempest/lib/services/volume/v3/qos_client.py
+++ b/tempest/lib/services/volume/v3/qos_client.py
@@ -14,6 +14,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import qos as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -41,19 +42,19 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-qos-specification
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-qos-specification
"""
post_body = json.dumps({'qos_specs': kwargs})
resp, body = self.post('qos-specs', post_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.show_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_qos(self, qos_id, force=False):
"""Delete the specified QoS specification."""
resp, body = self.delete(
"qos-specs/%s?force=%s" % (qos_id, force))
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def list_qos(self):
@@ -61,7 +62,7 @@
url = 'qos-specs'
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def show_qos(self, qos_id):
@@ -69,7 +70,7 @@
url = "qos-specs/%s" % qos_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def set_qos_key(self, qos_id, **kwargs):
@@ -77,12 +78,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#set-keys-in-a-qos-specification
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#set-keys-in-a-qos-specification
"""
put_body = json.dumps({"qos_specs": kwargs})
resp, body = self.put('qos-specs/%s' % qos_id, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.set_qos_key, resp, body)
return rest_client.ResponseBody(resp, body)
def unset_qos_key(self, qos_id, keys):
@@ -92,11 +93,11 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#unset-keys-in-a-qos-specification
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#unset-keys-in-a-qos-specification
"""
put_body = json.dumps({'keys': keys})
resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.unset_qos_key, resp, body)
return rest_client.ResponseBody(resp, body)
def associate_qos(self, qos_id, vol_type_id):
@@ -104,7 +105,7 @@
url = "qos-specs/%s/associate" % qos_id
url += "?vol_type_id=%s" % vol_type_id
resp, body = self.get(url)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.associate_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def show_association_qos(self, qos_id):
@@ -112,7 +113,7 @@
url = "qos-specs/%s/associations" % qos_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_association_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def disassociate_qos(self, qos_id, vol_type_id):
@@ -120,12 +121,12 @@
url = "qos-specs/%s/disassociate" % qos_id
url += "?vol_type_id=%s" % vol_type_id
resp, body = self.get(url)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.disassociate_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def disassociate_all_qos(self, qos_id):
"""Disassociate the specified QoS with all associations."""
url = "qos-specs/%s/disassociate_all" % qos_id
resp, body = self.get(url)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.disassociate_all_qos, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/quota_classes_client.py b/tempest/lib/services/volume/v3/quota_classes_client.py
index a8eb536..ff62f0c 100644
--- a/tempest/lib/services/volume/v3/quota_classes_client.py
+++ b/tempest/lib/services/volume/v3/quota_classes_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import quota_classes as schema
from tempest.lib.common import rest_client
@@ -26,12 +27,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#show-quota-classes-for-a-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-quota-classes-for-a-project
"""
url = 'os-quota-class-sets/%s' % quota_class_id
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.show_quota_classes, resp, body)
return rest_client.ResponseBody(resp, body)
def update_quota_class_set(self, quota_class_id, **kwargs):
@@ -39,11 +40,11 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-quota-classes-for-a-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-quota-classes-for-a-project
"""
url = 'os-quota-class-sets/%s' % quota_class_id
put_body = json.dumps({'quota_class_set': kwargs})
resp, body = self.put(url, put_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.update_quota_classes, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/quotas_client.py b/tempest/lib/services/volume/v3/quotas_client.py
index 538a915..5b1a52c 100644
--- a/tempest/lib/services/volume/v3/quotas_client.py
+++ b/tempest/lib/services/volume/v3/quotas_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import quotas as schema
from tempest.lib.common import rest_client
@@ -27,8 +28,8 @@
url = 'os-quota-sets/%s/defaults' % tenant_id
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = jsonutils.loads(body)
+ self.validate_response(schema.show_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
def show_quota_set(self, tenant_id, params=None):
@@ -39,8 +40,11 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = jsonutils.loads(body)
+ if params and params.get('usage', False):
+ self.validate_response(schema.show_quota_set_usage, resp, body)
+ else:
+ self.validate_response(schema.show_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
def update_quota_set(self, tenant_id, **kwargs):
@@ -48,16 +52,16 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-quotas-for-a-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-quotas-for-a-project
"""
put_body = jsonutils.dumps({'quota_set': kwargs})
resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
- self.expected_success(200, resp.status)
body = jsonutils.loads(body)
+ self.validate_response(schema.update_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.delete_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/scheduler_stats_client.py b/tempest/lib/services/volume/v3/scheduler_stats_client.py
index b1d3e37..e18980d 100644
--- a/tempest/lib/services/volume/v3/scheduler_stats_client.py
+++ b/tempest/lib/services/volume/v3/scheduler_stats_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-all-back-end-storage-pools
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-all-back-end-storage-pools
"""
url = 'scheduler-stats/get_pools'
schema_get_pools = schema.get_pools_no_detail
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 22155a9..8bc82c9 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import services as schema
from tempest.lib.common import rest_client
@@ -27,7 +28,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#list-all-cinder-services
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-all-cinder-services
"""
url = 'os-services'
if params:
@@ -35,7 +36,7 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_services, resp, body)
return rest_client.ResponseBody(resp, body)
def enable_service(self, **kwargs):
@@ -43,12 +44,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#enable-a-cinder-service
+ https://docs.openstack.org/api-ref/block-storage/v3/#enable-a-cinder-service
"""
put_body = json.dumps(kwargs)
resp, body = self.put('os-services/enable', put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.enable_service, resp, body)
return rest_client.ResponseBody(resp, body)
def disable_service(self, **kwargs):
@@ -56,12 +57,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#disable-a-cinder-service
+ https://docs.openstack.org/api-ref/block-storage/v3/#disable-a-cinder-service
"""
put_body = json.dumps(kwargs)
resp, body = self.put('os-services/disable', put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.disable_service, resp, body)
return rest_client.ResponseBody(resp, body)
def disable_log_reason(self, **kwargs):
@@ -69,12 +70,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#log-disabled-cinder-service-information
+ https://docs.openstack.org/api-ref/block-storage/v3/#log-disabled-cinder-service-information
"""
put_body = json.dumps(kwargs)
resp, body = self.put('os-services/disable-log-reason', put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.disable_log_reason, resp, body)
return rest_client.ResponseBody(resp, body)
def freeze_host(self, **kwargs):
@@ -82,11 +83,11 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#freeze-a-cinder-backend-host
+ https://docs.openstack.org/api-ref/block-storage/v3/#freeze-a-cinder-backend-host
"""
put_body = json.dumps(kwargs)
- resp, _ = self.put('os-services/freeze', put_body)
- self.expected_success(200, resp.status)
+ resp, body = self.put('os-services/freeze', put_body)
+ self.validate_response(schema.freeze_host, resp, body)
return rest_client.ResponseBody(resp)
def thaw_host(self, **kwargs):
@@ -94,9 +95,9 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#thaw-a-cinder-backend-host
+ https://docs.openstack.org/api-ref/block-storage/v3/#thaw-a-cinder-backend-host
"""
put_body = json.dumps(kwargs)
- resp, _ = self.put('os-services/thaw', put_body)
- self.expected_success(200, resp.status)
+ resp, body = self.put('os-services/thaw', put_body)
+ self.validate_response(schema.thaw_host, resp, body)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v3/snapshot_manage_client.py b/tempest/lib/services/volume/v3/snapshot_manage_client.py
index 43fd328..77920e4 100644
--- a/tempest/lib/services/volume/v3/snapshot_manage_client.py
+++ b/tempest/lib/services/volume/v3/snapshot_manage_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import manage_snapshot as schema
from tempest.lib.common import rest_client
@@ -22,10 +23,15 @@
"""Snapshot manage client."""
def manage_snapshot(self, **kwargs):
- """Manage a snapshot."""
+ """Manage a snapshot.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#manage-an-existing-snapshot
+ """
post_body = json.dumps({'snapshot': kwargs})
url = 'os-snapshot-manage'
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
body = json.loads(body)
+ self.validate_response(schema.manage_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
index cae65b2..8ca2044 100644
--- a/tempest/lib/services/volume/v3/snapshots_client.py
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import snapshots as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -28,18 +29,20 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-snapshots
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-snapshots-and-details
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-snapshots
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-snapshots-and-details
"""
url = 'snapshots'
+ list_schema = schema.list_snapshots_no_detail
if detail:
url += '/detail'
+ list_schema = schema.list_snapshots_with_detail
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(list_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def show_snapshot(self, snapshot_id):
@@ -47,12 +50,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#show-a-snapshot-s-details
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-a-snapshot-s-details
"""
url = "snapshots/%s" % snapshot_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
def create_snapshot(self, **kwargs):
@@ -60,12 +63,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-snapshot
"""
post_body = json.dumps({'snapshot': kwargs})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.create_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
def update_snapshot(self, snapshot_id, **kwargs):
@@ -73,12 +76,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-a-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-a-snapshot
"""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.update_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_snapshot(self, snapshot_id):
@@ -86,10 +89,10 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#delete-a-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#delete-a-snapshot
"""
resp, body = self.delete("snapshots/%s" % snapshot_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
@@ -108,7 +111,7 @@
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.reset_snapshot_status, resp, body)
return rest_client.ResponseBody(resp, body)
def update_snapshot_status(self, snapshot_id, **kwargs):
@@ -116,12 +119,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#update-status-of-a-snapshot
+ https://docs.openstack.org/api-ref/block-storage/v3/#update-status-of-a-snapshot
"""
post_body = json.dumps({'os-update_snapshot_status': kwargs})
url = 'snapshots/%s/action' % snapshot_id
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.update_snapshot_status, resp, body)
return rest_client.ResponseBody(resp, body)
def create_snapshot_metadata(self, snapshot_id, metadata):
@@ -129,13 +132,13 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-snapshot-s-metadata
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-snapshot-s-metadata
"""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.post(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.create_snapshot_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def show_snapshot_metadata(self, snapshot_id):
@@ -143,12 +146,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#show-a-snapshot-s-metadata
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-a-snapshot-s-metadata
"""
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_snapshot_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata(self, snapshot_id, **kwargs):
@@ -156,13 +159,13 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-a-snapshot-s-metadata
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-a-snapshot-s-metadata
"""
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.put(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.update_snapshot_metadata, resp, body)
return rest_client.ResponseBody(resp, body)
def show_snapshot_metadata_item(self, snapshot_id, id):
@@ -170,7 +173,7 @@
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_snapshot_metadata_item, resp, body)
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
@@ -178,27 +181,29 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#update-a-snapshot-s-metadata-for-a-specific-key
+ https://docs.openstack.org/api-ref/block-storage/v3/#update-a-snapshot-s-metadata-for-a-specific-key
"""
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.put(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.update_snapshot_metadata_item, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.delete(url)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.delete_snapshot_metadata_item, resp, body)
return rest_client.ResponseBody(resp, body)
def force_delete_snapshot(self, snapshot_id):
"""Force Delete Snapshot."""
post_body = json.dumps({'os-force_delete': {}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.force_delete_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
def unmanage_snapshot(self, snapshot_id):
@@ -206,5 +211,5 @@
post_body = json.dumps({'os-unmanage': {}})
url = 'snapshots/%s/action' % (snapshot_id)
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.unmanage_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/transfers_client.py b/tempest/lib/services/volume/v3/transfers_client.py
index 97c5597..f572f95 100644
--- a/tempest/lib/services/volume/v3/transfers_client.py
+++ b/tempest/lib/services/volume/v3/transfers_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import transfers as schema
from tempest.lib.common import rest_client
@@ -27,12 +28,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume-transfer
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume-transfer
"""
post_body = json.dumps({'transfer': kwargs})
resp, body = self.post('os-volume-transfer', post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.create_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_transfer(self, transfer_id):
@@ -40,7 +41,7 @@
url = "os-volume-transfer/%s" % transfer_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_transfers(self, detail=False, **params):
@@ -48,23 +49,25 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-for-a-project
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-and-details
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-for-a-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-and-details
"""
url = 'os-volume-transfer'
+ schema_list_transfers = schema.list_volume_transfers_no_detail
if detail:
url += '/detail'
+ schema_list_transfers = schema.list_volume_transfers_with_detail
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema_list_transfers, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_volume_transfer(self, transfer_id):
"""Delete a volume transfer."""
resp, body = self.delete("os-volume-transfer/%s" % transfer_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
def accept_volume_transfer(self, transfer_id, **kwargs):
@@ -72,11 +75,11 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#accept-a-volume-transfer
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#accept-a-volume-transfer
"""
url = 'os-volume-transfer/%s/accept' % transfer_id
post_body = json.dumps({'accept': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.accept_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 13ecd15..7fa24a4 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import volume_types as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -40,7 +41,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-all-volume-types
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-all-volume-types
"""
url = 'types'
if params:
@@ -48,7 +49,7 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_volume_types, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_type(self, volume_type_id):
@@ -56,12 +57,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#show-volume-type-detail
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-volume-type-detail
"""
url = "types/%s" % volume_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
def create_volume_type(self, **kwargs):
@@ -69,12 +70,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume-type
"""
post_body = json.dumps({'volume_type': kwargs})
resp, body = self.post('types', post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.create_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_volume_type(self, volume_type_id):
@@ -82,10 +83,10 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#delete-a-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#delete-a-volume-type
"""
resp, body = self.delete("types/%s" % volume_type_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_types_extra_specs(self, volume_type_id, **params):
@@ -93,7 +94,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#show-all-extra-specifications-for-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v3/#show-all-extra-specifications-for-volume-type
"""
url = 'types/%s/extra_specs' % volume_type_id
if params:
@@ -101,7 +102,8 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.list_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
@@ -109,7 +111,8 @@
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.show_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
@@ -122,14 +125,16 @@
post_body = json.dumps({'extra_specs': extra_specs})
resp, body = self.post(url, post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.create_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
"""Deletes the specified volume type extra spec."""
resp, body = self.delete("types/%s/extra_specs/%s" % (
volume_type_id, extra_spec_name))
- self.expected_success(202, resp.status)
+ self.validate_response(
+ schema.delete_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def update_volume_type(self, volume_type_id, **kwargs):
@@ -137,12 +142,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume-type
"""
put_body = json.dumps({'volume_type': kwargs})
resp, body = self.put('types/%s' % volume_type_id, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.update_volume_types, resp, body)
return rest_client.ResponseBody(resp, body)
def update_volume_type_extra_specs(self, volume_type_id, extra_spec_name,
@@ -153,15 +158,17 @@
:param extra_spec_name: Name of the extra spec to be updated.
:param extra_specs: A dictionary of with key as extra_spec_name and the
updated value.
+
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-extra-specification-for-volume-type
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-extra-specification-for-volume-type
"""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_spec_name)
put_body = json.dumps(extra_specs)
resp, body = self.put(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.update_volume_type_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def add_type_access(self, volume_type_id, **kwargs):
@@ -169,12 +176,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#add-private-volume-type-access-to-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#add-private-volume-type-access-to-project
"""
post_body = json.dumps({'addProjectAccess': kwargs})
url = 'types/%s/action' % volume_type_id
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.add_type_access, resp, body)
return rest_client.ResponseBody(resp, body)
def remove_type_access(self, volume_type_id, **kwargs):
@@ -182,12 +189,12 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#remove-private-volume-type-access-from-project
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#remove-private-volume-type-access-from-project
"""
post_body = json.dumps({'removeProjectAccess': kwargs})
url = 'types/%s/action' % volume_type_id
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.remove_type_access, resp, body)
return rest_client.ResponseBody(resp, body)
def list_type_access(self, volume_type_id):
@@ -195,10 +202,10 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-private-volume-type-access-detail
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-private-volume-type-access-detail
"""
url = 'types/%s/os-volume-type-access' % volume_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_type_access, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/versions_client.py b/tempest/lib/services/volume/v3/versions_client.py
index 57629bd..aa6c867 100644
--- a/tempest/lib/services/volume/v3/versions_client.py
+++ b/tempest/lib/services/volume/v3/versions_client.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
+from six.moves.urllib.parse import urljoin
from oslo_serialization import jsonutils as json
@@ -28,20 +28,35 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#list-all-api-versions
+ https://docs.openstack.org/api-ref/block-storage/v3/#list-all-api-versions
"""
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
# NOTE: We need a raw_request() here instead of request() call because
# "list API versions" API doesn't require an authentication and we can
# skip it with raw_request() call.
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
body = json.loads(body)
self.validate_response(schema.list_versions, resp, body)
return rest_client.ResponseBody(resp, body)
+
+ def show_version(self, version):
+ """Show API version details
+
+ Use raw_request in order to have access to the endpoints minus
+ version and project in order to add version only back.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/#show-api-v3-details
+ """
+
+ version_url = urljoin(self._get_base_version_url(), version + '/')
+ resp, body = self.raw_request(version_url, 'GET',
+ {'X-Auth-Token': self.token})
+ self._error_checker(resp, body)
+ body = json.loads(body)
+ self.validate_response(schema.volume_api_version_details, resp, body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/volume_manage_client.py b/tempest/lib/services/volume/v3/volume_manage_client.py
index 349e11d..85b1b82 100644
--- a/tempest/lib/services/volume/v3/volume_manage_client.py
+++ b/tempest/lib/services/volume/v3/volume_manage_client.py
@@ -26,7 +26,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#manage-an-existing-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#manage-an-existing-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('os-volume-manage', post_body)
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index 11c5767..4fb6d2e 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -35,14 +35,24 @@
return params
return urllib.urlencode(params)
+ def list_hosts(self):
+ """Lists all hosts summary info that is not disabled.
+
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-all-hosts-for-a-project
+ """
+ resp, body = self.get('os-hosts')
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def list_volumes(self, detail=False, params=None):
"""List all the volumes created.
Params can be a string (must be urlencoded) or a dictionary.
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-volumes-with-details
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-volumes
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-volumes-with-details
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-volumes
"""
url = 'volumes'
if detail:
@@ -55,6 +65,19 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def migrate_volume(self, volume_id, **kwargs):
+ """Migrate a volume to a new backend
+
+ For a full list of available parameters please refer to the offical
+ API reference:
+
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#migrate-a-volume
+ """
+ post_body = json.dumps({'os-migrate_volume': kwargs})
+ resp, body = self.post('volumes/%s/action' % volume_id, post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def show_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % volume_id
@@ -68,7 +91,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
@@ -81,7 +104,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume
"""
put_body = json.dumps({'volume': kwargs})
resp, body = self.put('volumes/%s' % volume_id, put_body)
@@ -94,7 +117,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#delete-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#delete-a-volume
"""
url = 'volumes/%s' % volume_id
if params:
@@ -108,7 +131,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/#get-volumes-summary
+ https://docs.openstack.org/api-ref/block-storage/v3/#get-volumes-summary
"""
url = 'volumes/summary'
if params:
@@ -119,7 +142,12 @@
return rest_client.ResponseBody(resp, body)
def upload_volume(self, volume_id, **kwargs):
- """Uploads a volume in Glance."""
+ """Uploads a volume in Glance.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#upload-volume-to-image
+ """
post_body = json.dumps({'os-volume_upload_image': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
@@ -132,7 +160,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#attach-volume-to-a-server
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#attach-volume-to-a-server
"""
post_body = json.dumps({'os-attach': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -145,7 +173,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume-s-bootable-status
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume-s-bootable-status
"""
post_body = json.dumps({'os-set_bootable': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -182,14 +210,16 @@
:param id: A checked resource id
:raises lib_exc.DeleteErrorException: If the specified resource is on
- the status the delete was failed.
+ the status the delete was failed.
"""
try:
volume = self.show_volume(id)
except lib_exc.NotFound:
return True
if volume["volume"]["status"] == "error_deleting":
- raise lib_exc.DeleteErrorException(resource_id=id)
+ raise lib_exc.DeleteErrorException(
+ "Volume %s failed to delete and is in error_deleting status" %
+ volume['id'])
return False
@property
@@ -202,7 +232,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#extend-a-volume-size
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#extend-a-volume-size
"""
post_body = json.dumps({'os-extend': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -215,7 +245,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#reset-a-volume-s-statuses
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#reset-a-volume-s-statuses
"""
post_body = json.dumps({'os-reset_status': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
@@ -223,7 +253,12 @@
return rest_client.ResponseBody(resp, body)
def update_volume_readonly(self, volume_id, **kwargs):
- """Update the Specified Volume readonly."""
+ """Update the Specified Volume readonly.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#updates-volume-read-only-access-mode-flag
+ """
post_body = json.dumps({'os-update_readonly_flag': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
@@ -242,7 +277,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#create-metadata-for-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-metadata-for-volume
"""
put_body = json.dumps({'metadata': metadata})
url = "volumes/%s/metadata" % volume_id
@@ -264,7 +299,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume-s-metadata
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#update-a-volume-s-metadata
"""
put_body = json.dumps({'metadata': metadata})
url = "volumes/%s/metadata" % volume_id
@@ -302,7 +337,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#retype-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#retype-a-volume
"""
post_body = json.dumps({'os-retype': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
@@ -314,7 +349,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#force-delete-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#force-detach-a-volume
"""
post_body = json.dumps({'os-force_detach': kwargs})
url = 'volumes/%s/action' % volume_id
@@ -327,7 +362,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#set-image-metadata-for-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#set-image-metadata-for-a-volume
"""
post_body = json.dumps({'os-set_image_metadata': {'metadata': kwargs}})
url = "volumes/%s/action" % (volume_id)
@@ -358,7 +393,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/block-storage/v3/index.html#unmanage-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#unmanage-a-volume
"""
post_body = json.dumps({'os-unmanage': {}})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 00d45cd..efdfe8e 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -28,6 +28,8 @@
from tempest.common import waiters
from tempest import config
from tempest import exceptions
+from tempest.lib.common import api_microversion_fixture
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
@@ -37,12 +39,57 @@
LOG = log.getLogger(__name__)
+LATEST_MICROVERSION = 'latest'
+
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
+ compute_min_microversion = None
+ compute_max_microversion = LATEST_MICROVERSION
+ volume_min_microversion = None
+ volume_max_microversion = LATEST_MICROVERSION
+ placement_min_microversion = None
+ placement_max_microversion = LATEST_MICROVERSION
+
+ @classmethod
+ def skip_checks(cls):
+ super(ScenarioTest, cls).skip_checks()
+ api_version_utils.check_skip_with_microversion(
+ cls.compute_min_microversion, cls.compute_max_microversion,
+ CONF.compute.min_microversion, CONF.compute.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.volume_min_microversion, cls.volume_max_microversion,
+ CONF.volume.min_microversion, CONF.volume.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.placement_min_microversion, cls.placement_max_microversion,
+ CONF.placement.min_microversion, CONF.placement.max_microversion)
+
+ @classmethod
+ def resource_setup(cls):
+ super(ScenarioTest, cls).resource_setup()
+ cls.compute_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.compute_min_microversion,
+ CONF.compute.min_microversion))
+ cls.volume_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.volume_min_microversion,
+ CONF.volume.min_microversion))
+ cls.placement_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.placement_min_microversion,
+ CONF.placement.min_microversion))
+
+ def setUp(self):
+ super(ScenarioTest, self).setUp()
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ compute_microversion=self.compute_request_microversion,
+ volume_microversion=self.volume_request_microversion,
+ placement_microversion=self.placement_request_microversion))
+
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
@@ -125,6 +172,27 @@
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
+
+ :param **kwargs:
+ See extra parameters below
+
+ :Keyword Arguments:
+ * *vnic_type* (``string``) --
+ used when launching instances with pre-configured ports.
+ Examples:
+ normal: a traditional virtual port that is either attached
+ to a linux bridge or an openvswitch bridge on a
+ compute node.
+ direct: an SR-IOV port that is directly attached to a VM
+ macvtap: an SR-IOV port that is attached to a VM via a macvtap
+ device.
+ Defaults to ``CONF.network.port_vnic_type``.
+ * *port_profile* (``dict``) --
+ This attribute is a dictionary that can be used (with admin
+ credentials) to supply information influencing the binding of
+ the port.
+ example: port_profile = "capabilities:[switchdev]"
+ Defaults to ``CONF.network.port_profile``.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
@@ -143,8 +211,8 @@
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-server")
- vnic_type = CONF.network.port_vnic_type
- profile = CONF.network.port_profile
+ vnic_type = kwargs.pop('vnic_type', CONF.network.port_vnic_type)
+ profile = kwargs.pop('port_profile', CONF.network.port_profile)
# If vnic_type or profile are configured create port for
# every network
@@ -166,7 +234,7 @@
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
- for s in security_groups])
+ for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
@@ -206,6 +274,10 @@
tenant_network = self.get_tenant_network()
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
body, _ = compute.create_test_server(
clients,
tenant_network=tenant_network,
@@ -239,6 +311,11 @@
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
+
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
@@ -303,11 +380,32 @@
snapshot['id'])['snapshot']
return snapshot
+ def _cleanup_volume_type(self, volume_type):
+ """Clean up a given volume type.
+
+ Ensuring all volumes associated to a type are first removed before
+ attempting to remove the type itself. This includes any image volume
+ cache volumes stored in a separate tenant to the original volumes
+ created from the type.
+ """
+ admin_volume_type_client = self.os_admin.volume_types_client_latest
+ admin_volumes_client = self.os_admin.volumes_client_latest
+ volumes = admin_volumes_client.list_volumes(
+ detail=True, params={'all_tenants': 1})['volumes']
+ type_name = volume_type['name']
+ for volume in [v for v in volumes if v['volume_type'] == type_name]:
+ test_utils.call_and_ignore_notfound_exc(
+ admin_volumes_client.delete_volume, volume['id'])
+ admin_volumes_client.wait_for_resource_deletion(volume['id'])
+ admin_volume_type_client.delete_volume_type(volume_type['id'])
+
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
client = self.os_admin.volume_types_client_latest
- randomized_name = name or data_utils.rand_name(
- 'volume-type-' + self.__class__.__name__)
+ if not name:
+ class_name = self.__class__.__name__
+ name = data_utils.rand_name(class_name + '-volume-type')
+ randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s on backend %s",
randomized_name, backend_name)
@@ -317,7 +415,7 @@
volume_type = client.create_volume_type(
name=randomized_name, extra_specs=extra_specs)['volume_type']
- self.addCleanup(client.delete_volume_type, volume_type['id'])
+ self.addCleanup(self._cleanup_volume_type, volume_type)
return volume_type
def _create_loginable_secgroup_rule(self, secgroup_id=None):
@@ -378,12 +476,12 @@
server=None):
"""Get a SSH client to a remote server
- @param ip_address the server floating or fixed IP address to use
- for ssh validation
- @param username name of the Linux account on the remote server
- @param private_key the SSH private key to use
- @param server: server dict, used for debugging purposes
- @return a RemoteClient object
+ :param ip_address: the server floating or fixed IP address to use
+ for ssh validation
+ :param username: name of the Linux account on the remote server
+ :param private_key: the SSH private key to use
+ :param server: server dict, used for debugging purposes
+ :return: a RemoteClient object
"""
if username is None:
@@ -536,8 +634,7 @@
def nova_volume_attach(self, server, volume_to_attach):
volume = self.servers_client.attach_volume(
- server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
- % CONF.compute.volume_device_name)['volumeAttachment']
+ server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
@@ -713,6 +810,42 @@
server_details = cls.os_admin.servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
+ def _get_bdm(self, source_id, source_type, delete_on_termination=False):
+ bd_map_v2 = [{
+ 'uuid': source_id,
+ 'source_type': source_type,
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': delete_on_termination}]
+ return {'block_device_mapping_v2': bd_map_v2}
+
+ def boot_instance_from_resource(self, source_id,
+ source_type,
+ keypair=None,
+ security_group=None,
+ delete_on_termination=False,
+ name=None):
+ create_kwargs = dict()
+ if keypair:
+ create_kwargs['key_name'] = keypair['name']
+ if security_group:
+ create_kwargs['security_groups'] = [
+ {'name': security_group['name']}]
+ create_kwargs.update(self._get_bdm(
+ source_id,
+ source_type,
+ delete_on_termination=delete_on_termination))
+ if name:
+ create_kwargs['name'] = name
+
+ return self.create_server(image_id='', **create_kwargs)
+
+ def create_volume_from_image(self):
+ img_uuid = CONF.compute.image_ref
+ vol_name = data_utils.rand_name(
+ self.__class__.__name__ + '-volume-origin')
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
+
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
@@ -737,13 +870,15 @@
def _create_network(self, networks_client=None,
tenant_id=None,
namestart='network-smoke-',
- port_security_enabled=True):
+ port_security_enabled=True, **net_dict):
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
tenant_id = networks_client.tenant_id
name = data_utils.rand_name(namestart)
network_kwargs = dict(name=name, tenant_id=tenant_id)
+ if net_dict:
+ network_kwargs.update(net_dict)
# Neutron disables port security by default so we have to check the
# config before trying to create the network with port_security_enabled
if CONF.network_feature_enabled.port_security:
@@ -831,18 +966,21 @@
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
- p_status = ['ACTIVE']
- # NOTE(vsaienko) With Ironic, instances live on separate hardware
- # servers. Neutron does not bind ports for Ironic instances, as a
- # result the port remains in the DOWN state.
- # TODO(vsaienko) remove once bug: #1599836 is resolved.
- if getattr(CONF.service_available, 'ironic', False):
- p_status.append('DOWN')
+
+ def _is_active(port):
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state. This has been fixed
+ # with the introduction of the networking-baremetal plugin but
+ # it's not mandatory (and is not used on all stable branches).
+ return (port['status'] == 'ACTIVE' or
+ port.get('binding:vnic_type') == 'baremetal')
+
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
if (netutils.is_valid_ipv4(fxip["ip_address"]) and
- p['status'] in p_status)]
+ _is_active(p))]
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
@@ -873,13 +1011,18 @@
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
- result = client.create_floatingip(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing['tenant_id'],
- fixed_ip_address=ip4
- )
+
+ kwargs = {
+ 'floating_network_id': external_network_id,
+ 'port_id': port_id,
+ 'tenant_id': thing['tenant_id'],
+ 'fixed_ip_address': ip4,
+ }
+ if CONF.network.subnet_id:
+ kwargs['subnet_id'] = CONF.network.subnet_id
+ result = client.create_floatingip(**kwargs)
floating_ip = result['floatingip']
+
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_floatingip,
floating_ip['id'])
@@ -937,24 +1080,33 @@
raise
def check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
- """assert ping server via source ssh connection
+ nic=None, protocol='icmp'):
+ """check server connectivity via source ssh connection
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: an IP to ping against
- :param should_succeed: boolean: should ping succeed or not
- :param nic: specific network interface to ping from
+ :param source: RemoteClient: an ssh connection from which to execute
+ the check
+ :param dest: an IP to check connectivity against
+ :param should_succeed: boolean should connection succeed or not
+ :param nic: specific network interface to test connectivity from
+ :param protocol: the protocol used to test connectivity with.
+ :returns: True, if the connection succeeded and it was expected to
+ succeed. False otherwise.
"""
- def ping_remote():
+ method_name = '%s_check' % protocol
+ connectivity_checker = getattr(source, method_name)
+
+ def connect_remote():
try:
- source.ping_host(dest, nic=nic)
+ connectivity_checker(dest, nic=nic)
except lib_exc.SSHExecCommandFailed:
- LOG.warning('Failed to ping IP: %s via a ssh connection '
- 'from: %s.', dest, source.ssh_client.host)
+ LOG.warning('Failed to check %(protocol)s connectivity for '
+ 'IP %(dest)s via a ssh connection from: %(src)s.',
+ dict(protocol=protocol, dest=dest,
+ src=source.ssh_client.host))
return not should_succeed
return should_succeed
- result = test_utils.call_until_true(ping_remote,
+ result = test_utils.call_until_true(connect_remote,
CONF.validation.ping_timeout, 1)
if result:
return
@@ -1159,7 +1311,7 @@
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
- port_security_enabled=True):
+ port_security_enabled=True, **net_dict):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
@@ -1167,6 +1319,11 @@
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
+ :param port_security_enabled: whether or not port_security is enabled
+ :param net_dict: a dict containing experimental network information in
+ a form like this: {'provider:network_type': 'vlan',
+ 'provider:physical_network': 'foo',
+ 'provider:segmentation_id': '42'}
:returns: network, subnet, router
"""
if CONF.network.shared_physical_network:
@@ -1186,7 +1343,8 @@
network = self._create_network(
networks_client=networks_client,
tenant_id=tenant_id,
- port_security_enabled=port_security_enabled)
+ port_security_enabled=port_security_enabled,
+ **net_dict)
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 8c210d5..008d1ae 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -29,11 +29,12 @@
For both LUKS and cryptsetup encryption types, this test performs
the following:
- * Creates an image in Glance
- * Boots an instance from the image
- * Creates an encryption type (as admin)
- * Creates a volume of that encryption type (as a regular user)
- * Attaches and detaches the encrypted volume to the instance
+
+ * Creates an image in Glance
+ * Boots an instance from the image
+ * Creates an encryption type (as admin)
+ * Creates a volume of that encryption type (as a regular user)
+ * Attaches and detaches the encrypted volume to the instance
"""
@classmethod
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
new file mode 100644
index 0000000..e7085f6
--- /dev/null
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.scenario import manager
+
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
+ credentials = ['primary', 'admin']
+ required_extensions = ['port-resource-request',
+ 'qos',
+ 'qos-bw-minimum-ingress']
+ # The feature QoS minimum bandwidth allocation in Placement API depends on
+ # Granular resource requests to GET /allocation_candidates and Support
+ # allocation candidates with nested resource providers features in
+ # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
+ # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
+ # means that the minimum placement microversion is 1.29
+ placement_min_microversion = '1.29'
+ placement_max_microversion = 'latest'
+
+ # Nova rejects to boot VM with port which has resource_request field, below
+ # microversion 2.72
+ compute_min_microversion = '2.72'
+ compute_max_microversion = 'latest'
+
+ INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+ INGRESS_DIRECTION = 'ingress'
+
+ SMALLEST_POSSIBLE_BW = 1
+ # For any realistic inventory value (that is inventory != MAX_INT) an
+ # allocation candidate request of MAX_INT is expected to be rejected, see:
+ # https://github.com/openstack/placement/blob/master/placement/
+ # db/constants.py#L16
+ PLACEMENT_MAX_INT = 0x7FFFFFFF
+
+ @classmethod
+ def setup_clients(cls):
+ super(MinBwAllocationPlacementTest, cls).setup_clients()
+ cls.placement_client = cls.os_admin.placement_client
+ cls.networks_client = cls.os_admin.networks_client
+ cls.subnets_client = cls.os_admin.subnets_client
+ cls.routers_client = cls.os_adm.routers_client
+ cls.qos_client = cls.os_admin.qos_client
+ cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+
+ @classmethod
+ def skip_checks(cls):
+ super(MinBwAllocationPlacementTest, cls).skip_checks()
+ if not CONF.network_feature_enabled.qos_placement_physnet:
+ msg = "Skipped as no physnet is available in config for " \
+ "placement based QoS allocation."
+ raise cls.skipException(msg)
+
+ def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
+ policy = self.qos_client.create_qos_policy(
+ name=data_utils.rand_name(name_prefix),
+ shared=True)['policy']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.qos_client.delete_qos_policy, policy['id'])
+ rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+ policy['id'],
+ **{
+ 'min_kbps': min_kbps,
+ 'direction': self.INGRESS_DIRECTION
+ })['minimum_bandwidth_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
+ rule['id'])
+
+ return policy
+
+ def _create_qos_policies(self):
+ self.qos_policy_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_valid',
+ min_kbps=self.SMALLEST_POSSIBLE_BW)
+ self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_not_valid',
+ min_kbps=self.PLACEMENT_MAX_INT)
+
+ def _create_network_and_qos_policies(self):
+ physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+ base_segm = \
+ CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+ self.prov_network, _, _ = self.create_networks(
+ networks_client=self.networks_client,
+ routers_client=self.routers_client,
+ subnets_client=self.subnets_client,
+ **{
+ 'shared': True,
+ 'provider:network_type': 'vlan',
+ 'provider:physical_network': physnet_name,
+ 'provider:segmentation_id': base_segm
+ })
+
+ self._create_qos_policies()
+
+ def _check_if_allocation_is_possible(self):
+ alloc_candidates = self.placement_client.list_allocation_candidates(
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+ self.SMALLEST_POSSIBLE_BW))
+ if len(alloc_candidates['provider_summaries']) == 0:
+ self.fail('No allocation candidates are available for %s:%s' %
+ (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+
+ # Just to be sure check with impossible high (placement max_int),
+ # allocation
+ alloc_candidates = self.placement_client.list_allocation_candidates(
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+ self.PLACEMENT_MAX_INT))
+ if len(alloc_candidates['provider_summaries']) != 0:
+ self.fail('For %s:%s there should be no available candidate!' %
+ (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+
+ @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_basic(self):
+ """"Basic scenario with QoS min bw allocation in placement.
+
+ Steps:
+ * Create prerequisites:
+ ** VLAN type provider network with subnet.
+ ** valid QoS policy with minimum bandwidth rule with min_kbps=1
+ (This is a simplification to skip the checks in placement for
+ detecting the resource provider tree and inventories, as if
+ bandwidth resource is available 1 kbs will be available).
+ ** invalid QoS policy with minimum bandwidth rule with
+ min_kbs=max integer from placement (this is a simplification again
+ to avoid detection of RP tress and inventories, as placement will
+ reject such big allocation).
+ * Create port with valid QoS policy, and boot VM with that, it should
+ pass.
+ * Create port with invalid QoS policy, and try to boot VM with that,
+ it should fail.
+ """
+
+ self._check_if_allocation_is_possible()
+
+ self._create_network_and_qos_policies()
+
+ valid_port = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': valid_port['id']}])
+ allocations = self.placement_client.list_allocations(server1['id'])
+
+ self.assertGreater(len(allocations['allocations']), 0)
+ bw_resource_in_alloc = False
+ for rp, resources in allocations['allocations'].items():
+ if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+ bw_resource_in_alloc = True
+ self.assertTrue(bw_resource_in_alloc)
+
+ # boot another vm with max int bandwidth
+ not_valid_port = self.create_port(
+ self.prov_network['id'],
+ qos_policy_id=self.qos_policy_not_valid['id'])
+ server2 = self.create_server(
+ wait_until=None,
+ networks=[{'port': not_valid_port['id']}])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server2['id'],
+ status='ERROR', ready_wait=False, raise_on_error=False)
+ allocations = self.placement_client.list_allocations(server2['id'])
+
+ self.assertEqual(0, len(allocations['allocations']))
+ server2 = self.servers_client.show_server(server2['id'])
+ self.assertIn('fault', server2['server'])
+ self.assertIn('No valid host', server2['server']['fault']['message'])
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2b35e45..4cd860d 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -48,6 +48,7 @@
10. Check SSH connection to instance after reboot
"""
+
def nova_show(self, server):
got_server = (self.servers_client.show_server(server['id'])
['server'])
@@ -60,7 +61,11 @@
def cinder_show(self, volume):
got_volume = self.volumes_client.show_volume(volume['id'])['volume']
- self.assertEqual(volume, got_volume)
+ # Exclude updated_at because of bug 1838202.
+ excluded_keys = ['updated_at']
+ self.assertThat(
+ volume, custom_matchers.MatchesDictExceptForKeys(
+ got_volume, excluded_keys=excluded_keys))
def nova_reboot(self, server):
self.servers_client.reboot_server(server['id'], type='SOFT')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e94ce3d..b1919d4 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -119,6 +119,7 @@
server, keypair, floating_ip)
@decorators.idempotent_id('7b6860c2-afa3-4846-9522-adeb38dfbe08')
+ @decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_server_connectivity_reboot(self):
keypair = self.create_keypair()
@@ -199,7 +200,8 @@
self.assertEqual(resize_flavor, server['flavor']['id'])
else:
flavor = self.flavors_client.show_flavor(resize_flavor)['flavor']
- for key in ['original_name', 'ram', 'vcpus', 'disk']:
+ self.assertEqual(flavor['name'], server['original_name'])
+ for key in ['ram', 'vcpus', 'disk']:
self.assertEqual(flavor[key], server['flavor'][key])
self._wait_server_status_and_check_network_connectivity(
server, keypair, floating_ip)
@@ -230,7 +232,37 @@
self.assertNotEqual(src_host, dst_host)
- @decorators.skip_because(bug='1788403')
+ @decorators.idempotent_id('03fd1562-faad-11e7-9ea0-fa163e65f5ce')
+ @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+ 'Live migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_server_connectivity_live_migration(self):
+ keypair = self.create_keypair()
+ server = self._setup_server(keypair)
+ floating_ip = self._setup_network(server, keypair)
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ block_migration = (CONF.compute_feature_enabled.
+ block_migration_for_live_migration)
+ old_host = self.get_host_for_server(server['id'])
+ self.admin_servers_client.live_migrate_server(
+ server['id'], host=None, block_migration=block_migration,
+ disk_over_commit=False)
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ new_host = self.get_host_for_server(server['id'])
+ self.assertNotEqual(old_host, new_host, 'Server did not migrate')
+
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ @decorators.skip_because(bug='1836595')
@decorators.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration is not available.')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index c1132cf..d8584ec 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -292,11 +292,14 @@
% CONF.network.build_timeout)
_, new_nic = self.diff_list[0]
- ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
- new_port['fixed_ips'][0]['ip_address'],
- CONF.network.project_network_mask_bits,
- new_nic))
- ssh_client.exec_command("sudo ip link set %s up" % new_nic)
+ ip_output = ssh_client.exec_command('ip a')
+ ip_address = new_port['fixed_ips'][0]['ip_address']
+ ip_mask = CONF.network.project_network_mask_bits
+ # check if the address is not already in use, if not, set it
+ if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
+ ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
+ ip_address, ip_mask, new_nic))
+ ssh_client.exec_command("sudo ip link set %s up" % new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+)[@]?.*:')
@@ -343,10 +346,19 @@
network_id=CONF.network.public_network_id)['subnets']
if s['ip_version'] == 4
]
- self.assertEqual(1, len(v4_subnets),
- "Found %d IPv4 subnets" % len(v4_subnets))
- external_ips = [v4_subnets[0]['gateway_ip']]
+ if len(v4_subnets) > 1:
+ self.assertTrue(
+ CONF.network.subnet_id,
+ "Found %d subnets. Specify subnet using configuration "
+ "option [network].subnet_id."
+ % len(v4_subnets))
+ subnet = self.os_admin.subnets_client.show_subnet(
+ CONF.network.subnet_id)['subnet']
+ external_ips = [subnet['gateway_ip']]
+ else:
+ external_ips = [v4_subnets[0]['gateway_ip']]
+
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
@@ -374,39 +386,37 @@
def test_network_basic_ops(self):
"""Basic network operation test
- For a freshly-booted VM with an IP address ("port") on a given
- network:
+ For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address. This implies, but
- does not guarantee (see the ssh check that follows), that the
- VM has been assigned the correct IP address and has
- connectivity to the Tempest host.
+ does not guarantee (see the ssh check that follows), that the
+ VM has been assigned the correct IP address and has
+ connectivity to the Tempest host.
- the Tempest host can perform key-based authentication to an
- ssh server hosted at the IP address. This check guarantees
- that the IP address is associated with the target VM.
+ ssh server hosted at the IP address. This check guarantees
+ that the IP address is associated with the target VM.
- the Tempest host can ssh into the VM via the IP address and
- successfully execute the following:
+ successfully execute the following:
- - ping an external IP address, implying external connectivity.
+ - ping an external IP address, implying external connectivity.
- - ping an external hostname, implying that dns is correctly
- configured.
+ - ping an external hostname, implying that dns is correctly
+ configured.
- - ping an internal IP address, implying connectivity to another
- VM on the same network.
+ - ping an internal IP address, implying connectivity to another
+ VM on the same network.
- detach the floating-ip from the VM and verify that it becomes
- unreachable
+ unreachable
- associate detached floating ip to a new VM and verify connectivity.
- VMs are created with unique keypair so connectivity also asserts that
- floating IP is associated with the new VM instead of the old one
+ VMs are created with unique keypair so connectivity also asserts
+ that floating IP is associated with the new VM instead of the old
+ one
Verifies that floating IP status is updated correctly after each change
-
-
"""
self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
@@ -445,30 +455,25 @@
def test_connectivity_between_vms_on_different_networks(self):
"""Test connectivity between VMs on different networks
- For a freshly-booted VM with an IP address ("port") on a given
- network:
+ For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address.
-
- the Tempest host can ssh into the VM via the IP address and
- successfully execute the following:
+ successfully execute the following:
- - ping an external IP address, implying external connectivity.
-
- - ping an external hostname, implying that dns is correctly
- configured.
-
- - ping an internal IP address, implying connectivity to another
- VM on the same network.
+ - ping an external IP address, implying external connectivity.
+ - ping an external hostname, implying that dns is correctly
+ configured.
+ - ping an internal IP address, implying connectivity to another
+ VM on the same network.
- Create another network on the same tenant with subnet, create
- an VM on the new network.
+ an VM on the new network.
- - Ping the new VM from previous VM failed since the new network
- was not attached to router yet.
-
- - Attach the new network to the router, Ping the new VM from
- previous VM succeed.
+ - Ping the new VM from previous VM failed since the new network
+ was not attached to router yet.
+ - Attach the new network to the router, Ping the new VM from
+ previous VM succeed.
"""
self._setup_network_and_servers()
@@ -476,9 +481,14 @@
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._create_new_network(create_gateway=True)
- self._create_server(self.new_net)
- self._check_network_internal_connectivity(network=self.new_net,
- should_connect=False)
+ new_server = self._create_server(self.new_net)
+ new_server_ips = [addr['addr'] for addr in
+ new_server['addresses'][self.new_net['name']]]
+
+ # Assert that pinging the new VM fails since the new network is not
+ # connected to a router
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ new_server_ips, should_connect=False)
router_id = self.router['id']
self.routers_client.add_router_interface(
router_id, subnet_id=self.new_subnet['id'])
@@ -486,8 +496,9 @@
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.remove_router_interface,
router_id, subnet_id=self.new_subnet['id'])
- self._check_network_internal_connectivity(network=self.new_net,
- should_connect=True)
+
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ new_server_ips, should_connect=True)
@decorators.idempotent_id('c5adff73-e961-41f1-b4a9-343614f18cfa')
@testtools.skipUnless(CONF.compute_feature_enabled.interface_attach,
@@ -555,34 +566,42 @@
def test_subnet_details(self):
"""Tests that subnet's extra configuration details are affecting VMs.
- This test relies on non-shared, isolated tenant networks.
+ This test relies on non-shared, isolated tenant networks.
- NOTE: Neutron subnets push data to servers via dhcp-agent, so any
- update in subnet requires server to actively renew its DHCP lease.
+ NOTE: Neutron subnets push data to servers via dhcp-agent, so any
+ update in subnet requires server to actively renew its DHCP lease.
- 1. Configure subnet with dns nameserver
- 2. retrieve the VM's configured dns and verify it matches the one
- configured for the subnet.
- 3. update subnet's dns
- 4. retrieve the VM's configured dns and verify it matches the new one
- configured for the subnet.
+ 1. Configure subnet with dns nameserver
+ 2. retrieve the VM's configured dns and verify it matches the one
+ configured for the subnet.
+ 3. update subnet's dns
+ 4. retrieve the VM's configured dns and verify it matches the new one
+ configured for the subnet.
- TODO(yfried): add host_routes
+ TODO(yfried): add host_routes
- any resolution check would be testing either:
- * l3 forwarding (tested in test_network_basic_ops)
- * Name resolution of an external DNS nameserver - out of scope for
- Tempest
+ any resolution check would be testing either:
+
+ * l3 forwarding (tested in test_network_basic_ops)
+ * Name resolution of an external DNS nameserver - out of scope for
+ Tempest
"""
# this test check only updates (no actual resolution) so using
# arbitrary ip addresses as nameservers, instead of parsing CONF
initial_dns_server = '1.2.3.4'
alt_dns_server = '9.8.7.6'
- # renewal should be immediate.
- # Timeouts are suggested by salvatore-orlando in
+ # Original timeouts are suggested by salvatore-orlando in
# https://bugs.launchpad.net/neutron/+bug/1412325/comments/3
- renew_delay = CONF.network.build_interval
+ #
+ # Compared to that renew_delay was increased, because
+ # busybox's udhcpc accepts SIGUSR1 as a renew request. Internally
+ # it goes into RENEW_REQUESTED state. If it receives a 2nd SIGUSR1
+ # signal while in that state then it calls the deconfig script
+ # ("/sbin/cirros-dhcpc deconfig" in sufficiently new cirros versions)
+ # which leads to the address being transiently deconfigured which
+ # for our case is unwanted.
+ renew_delay = 3 * CONF.network.build_interval
renew_timeout = CONF.network.build_timeout
self._setup_network_and_servers(dns_nameservers=[initial_dns_server])
@@ -742,7 +761,7 @@
2. Remove router from all l3-agents
3. Verify connectivity is down
4. Assign router to new l3-agent (or old one if no new agent is
- available)
+ available)
5. Verify connectivity
"""
@@ -823,7 +842,8 @@
prevents traffic to pass through the VM. Anti-spoof rules are not
required in cases where the VM routes traffic through it.
- The test steps are :
+ The test steps are:
+
1. Create a new network.
2. Connect (hotplug) the VM to a new network.
3. Check the VM can ping a server on the new network ("peer")
@@ -832,7 +852,7 @@
spoofed interface (VM cannot ping the peer).
6. Disable port-security of the spoofed port- set the flag to false.
7. Retest 3rd step and check that the Security Group allows pings via
- the spoofed interface.
+ the spoofed interface.
"""
spoof_mac = "00:00:00:00:00:01"
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index e4e39c3..8de6614 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -12,13 +12,18 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+from oslo_log import log as logging
+
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
+from tempest.lib import exceptions
from tempest.scenario import manager
CONF = config.CONF
+LOG = logging.getLogger(__name__)
class TestGettingAddress(manager.NetworkScenarioTest):
@@ -154,8 +159,32 @@
% (network_id, ports))
mac6 = ports[0]
nic = ssh.get_nic_name_by_mac(mac6)
+ # NOTE(slaweq): on RHEL based OS ifcfg file for new interface is
+ # needed to make IPv6 working on it, so if
+ # /etc/sysconfig/network-scripts directory exists ifcfg-%(nic)s file
+ # should be added in it
+ if self._sysconfig_network_scripts_dir_exists(ssh):
+ try:
+ ssh.exec_command(
+ 'echo -e "DEVICE=%(nic)s\\nNAME=%(nic)s\\nIPV6INIT=yes" | '
+ 'sudo tee /etc/sysconfig/network-scripts/ifcfg-%(nic)s; '
+ 'sudo nmcli connection reload' % {'nic': nic})
+ ssh.exec_command('sudo nmcli connection up %s' % nic)
+ except exceptions.SSHExecCommandFailed as e:
+ # NOTE(slaweq): Sometimes it can happen that this SSH command
+ # will fail because of some error from network manager in
+ # guest os.
+ # But even then doing ip link set up below is fine and
+ # IP address should be configured properly.
+ LOG.debug("Error during restarting %(nic)s interface on "
+ "instance. Error message: %(error)s",
+ {'nic': nic, 'error': e})
ssh.exec_command("sudo ip link set %s up" % nic)
+ def _sysconfig_network_scripts_dir_exists(self, ssh):
+ return "False" not in ssh.exec_command(
+ 'test -d /etc/sysconfig/network-scripts/ || echo "False"')
+
def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
net_list = self.prepare_network(address6_mode=address6_mode,
n_subnets6=n_subnets6,
@@ -252,6 +281,7 @@
dualnet=True)
@decorators.idempotent_id('9178ad42-10e4-47e9-8987-e02b170cc5cd')
+ @decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_dualnet_multi_prefix_slaac(self):
self._prepare_and_test(address6_mode='slaac', n_subnets6=2,
diff --git a/tempest/scenario/test_object_storage_basic_ops.py b/tempest/scenario/test_object_storage_basic_ops.py
index cbe321e..b635ca0 100644
--- a/tempest/scenario/test_object_storage_basic_ops.py
+++ b/tempest/scenario/test_object_storage_basic_ops.py
@@ -24,15 +24,15 @@
def test_swift_basic_ops(self):
"""Test swift basic ops.
- * get swift stat.
- * create container.
- * upload a file to the created container.
- * list container's objects and assure that the uploaded file is
- present.
- * download the object and check the content
- * delete object from container.
- * list container's objects and assure that the deleted file is gone.
- * delete a container.
+ * get swift stat.
+ * create container.
+ * upload a file to the created container.
+ * list container's objects and assure that the uploaded file is
+ present.
+ * download the object and check the content
+ * delete object from container.
+ * list container's objects and assure that the deleted file is gone.
+ * delete a container.
"""
self.get_swift_stat()
container_name = self.create_container()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 28a2d64..9cbd831 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -63,28 +63,28 @@
a. a security group open to incoming ssh connection
b. a VM with a floating ip
5. create a general empty security group (same as "default", but
- without rules allowing in-tenant traffic)
+ without rules allowing in-tenant traffic)
tests:
1. _verify_network_details
2. _verify_mac_addr: for each access point verify that
- (subnet, fix_ip, mac address) are as defined in the port list
+ (subnet, fix_ip, mac address) are as defined in the port list
3. _test_in_tenant_block: test that in-tenant traffic is disabled
- without rules allowing it
+ without rules allowing it
4. _test_in_tenant_allow: test that in-tenant traffic is enabled
- once an appropriate rule has been created
+ once an appropriate rule has been created
5. _test_cross_tenant_block: test that cross-tenant traffic is disabled
- without a rule allowing it on destination tenant
+ without a rule allowing it on destination tenant
6. _test_cross_tenant_allow:
* test that cross-tenant traffic is enabled once an appropriate
- rule has been created on destination tenant.
+ rule has been created on destination tenant.
* test that reverse traffic is still blocked
* test than reverse traffic is enabled once an appropriate rule has
- been created on source tenant
- 7._test_port_update_new_security_group:
- * test that traffic is blocked with default security group
- * test that traffic is enabled after updating port with new security
- group having appropriate rule
+ been created on source tenant
+ 7. _test_port_update_new_security_group:
+ * test that traffic is blocked with default security group
+ * test that traffic is enabled after updating port with new
+ security group having appropriate rule
8. _test_multiple_security_groups: test multiple security groups can be
associated with the vm
@@ -93,11 +93,13 @@
2. Public network is defined and reachable from the Tempest host
3. Public router can either be:
* defined, in which case all tenants networks can connect directly
- to it, and cross tenant check will be done on the private IP of the
- destination tenant
+ to it, and cross tenant check will be done on the private IP of
+ the destination tenant
+
or
+
* not defined (empty string), in which case each tenant will have
- its own router connected to the public network
+ its own router connected to the public network
"""
credentials = ['primary', 'alt', 'admin']
@@ -393,24 +395,22 @@
self.check_remote_connectivity(source=access_point_ssh,
dest=self._get_server_ip(server))
- def _test_cross_tenant_block(self, source_tenant, dest_tenant):
+ def _test_cross_tenant_block(self, source_tenant, dest_tenant, ruleset):
# if public router isn't defined, then dest_tenant access is via
# floating-ip
+ protocol = ruleset['protocol']
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self.check_remote_connectivity(source=access_point_ssh, dest=ip,
- should_succeed=False)
+ should_succeed=False, protocol=protocol)
- def _test_cross_tenant_allow(self, source_tenant, dest_tenant):
+ def _test_cross_tenant_allow(self, source_tenant, dest_tenant, ruleset):
"""check for each direction:
creating rule for tenant incoming traffic enables only 1way traffic
"""
- ruleset = dict(
- protocol='icmp',
- direction='ingress'
- )
+ protocol = ruleset['protocol']
sec_group_rules_client = (
dest_tenant.manager.security_group_rules_client)
self._create_security_group_rule(
@@ -421,10 +421,10 @@
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
- self.check_remote_connectivity(access_point_ssh, ip)
+ self.check_remote_connectivity(access_point_ssh, ip, protocol=protocol)
# test that reverse traffic is still blocked
- self._test_cross_tenant_block(dest_tenant, source_tenant)
+ self._test_cross_tenant_block(dest_tenant, source_tenant, ruleset)
# allow reverse traffic and check
sec_group_rules_client = (
@@ -438,7 +438,8 @@
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
floating=self.floating_ip_access)
- self.check_remote_connectivity(access_point_ssh_2, ip)
+ self.check_remote_connectivity(access_point_ssh_2, ip,
+ protocol=protocol)
def _verify_mac_addr(self, tenant):
"""Verify that VM has the same ip, mac as listed in port"""
@@ -468,6 +469,17 @@
self._log_console_output(
servers=[tenant.access_point], client=client)
+ def _create_protocol_ruleset(self, protocol, port=80):
+ if protocol == 'icmp':
+ ruleset = dict(protocol='icmp',
+ direction='ingress')
+ else:
+ ruleset = dict(protocol=protocol,
+ port_range_min=port,
+ port_range_max=port,
+ direction='ingress')
+ return ruleset
+
@decorators.idempotent_id('e79f879e-debb-440c-a7e4-efeda05b6848')
@utils.services('compute', 'network')
def test_cross_tenant_traffic(self):
@@ -482,8 +494,18 @@
# cross tenant check
source_tenant = self.primary_tenant
dest_tenant = self.alt_tenant
- self._test_cross_tenant_block(source_tenant, dest_tenant)
- self._test_cross_tenant_allow(source_tenant, dest_tenant)
+
+ protocol = CONF.scenario.protocol
+ LOG.debug("Testing cross tenant traffic for %s protocol",
+ protocol)
+ if protocol in ['udp', 'tcp']:
+ for tenant in [source_tenant, dest_tenant]:
+ access_point = self._connect_to_access_point(tenant)
+ access_point.nc_listen_host(protocol=protocol)
+
+ ruleset = self._create_protocol_ruleset(protocol)
+ self._test_cross_tenant_block(source_tenant, dest_tenant, ruleset)
+ self._test_cross_tenant_allow(source_tenant, dest_tenant, ruleset)
except Exception:
self._log_console_output_for_all_tenants()
raise
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 1671216..02bc692 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
+from oslo_serialization import jsonutils as json
from tempest.common import utils
from tempest.common import waiters
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 2782119..c3b3670 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -55,22 +55,25 @@
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- def _wait_for_volume_available_on_the_system(self, ip_address,
- private_key):
+ def _attached_volume_name(
+ self, disks_list_before_attach, ip_address, private_key):
ssh = self.get_remote_client(ip_address, private_key=private_key)
- def _func():
- disks = ssh.get_disks()
- LOG.debug("Disks: %s", disks)
- return CONF.compute.volume_device_name in disks
+ def _wait_for_volume_available_on_system():
+ disks_list_after_attach = ssh.list_disks()
+ return len(disks_list_after_attach) > len(disks_list_before_attach)
- if not test_utils.call_until_true(_func,
+ if not test_utils.call_until_true(_wait_for_volume_available_on_system,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise lib_exc.TimeoutException
+ disks_list_after_attach = ssh.list_disks()
+ volume_name = [item for item in disks_list_after_attach
+ if item not in disks_list_before_attach][0]
+ return volume_name
+
@decorators.attr(type='slow')
- @decorators.skip_because(bug="1664793")
@decorators.idempotent_id('10fd234a-515c-41e5-b092-8323060598c5')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@@ -91,11 +94,17 @@
# create and add floating IP to server1
ip_for_server = self.get_server_ip(server)
+ # Make sure the machine ssh-able before attaching the volume
+ linux_client = self.get_remote_client(
+ ip_for_server, private_key=keypair['private_key'],
+ server=server)
+ disks_list_before_attach = linux_client.list_disks()
self.nova_volume_attach(server, volume)
- self._wait_for_volume_available_on_the_system(ip_for_server,
- keypair['private_key'])
+ volume_device_name = self._attached_volume_name(
+ disks_list_before_attach, ip_for_server, keypair['private_key'])
+
timestamp = self.create_timestamp(ip_for_server,
- CONF.compute.volume_device_name,
+ volume_device_name,
private_key=keypair['private_key'],
server=server)
self.nova_volume_detach(server, volume)
@@ -119,14 +128,22 @@
# create and add floating IP to server_from_snapshot
ip_for_snapshot = self.get_server_ip(server_from_snapshot)
+ # Make sure the machine ssh-able before attaching the volume
+ # Just a live machine is responding
+ # for device attache/detach as expected
+ linux_client = self.get_remote_client(
+ ip_for_snapshot, private_key=keypair['private_key'],
+ server=server_from_snapshot)
+ disks_list_before_attach = linux_client.list_disks()
+
# attach volume2 to instance2
self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
- self._wait_for_volume_available_on_the_system(ip_for_snapshot,
- keypair['private_key'])
+ volume_device_name = self._attached_volume_name(
+ disks_list_before_attach, ip_for_snapshot, keypair['private_key'])
# check the existence of the timestamp file in the volume2
timestamp2 = self.get_timestamp(ip_for_snapshot,
- CONF.compute.volume_device_name,
+ volume_device_name,
private_key=keypair['private_key'],
server=server_from_snapshot)
self.assertEqual(timestamp, timestamp2)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 810480b..3b4bbda 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -11,7 +11,7 @@
# under the License.
from oslo_log import log as logging
-from oslo_serialization import jsonutils
+from oslo_serialization import jsonutils as json
import testtools
from tempest.common import utils
@@ -31,42 +31,6 @@
# breathing room to get through deletes in the time allotted.
TIMEOUT_SCALING_FACTOR = 2
- def _create_volume_from_image(self):
- img_uuid = CONF.compute.image_ref
- vol_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
-
- def _get_bdm(self, source_id, source_type, delete_on_termination=False):
- bd_map_v2 = [{
- 'uuid': source_id,
- 'source_type': source_type,
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'delete_on_termination': delete_on_termination}]
- return {'block_device_mapping_v2': bd_map_v2}
-
- def _boot_instance_from_resource(self, source_id,
- source_type,
- keypair=None,
- security_group=None,
- delete_on_termination=False,
- name=None):
- create_kwargs = dict()
- if keypair:
- create_kwargs['key_name'] = keypair['name']
- if security_group:
- create_kwargs['security_groups'] = [
- {'name': security_group['name']}]
- create_kwargs.update(self._get_bdm(
- source_id,
- source_type,
- delete_on_termination=delete_on_termination))
- if name:
- create_kwargs['name'] = name
-
- return self.create_server(image_id='', **create_kwargs)
-
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
@@ -76,6 +40,7 @@
self.snapshots_client.wait_for_resource_deletion(snapshot_id)
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
+ @decorators.attr(type='slow')
# Note: This test is being skipped based on 'public_network_id'.
# It is being used in create_floating_ip() method which gets called
# from get_server_ip() method
@@ -85,7 +50,6 @@
'Cinder volume snapshots are disabled')
@utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
-
"""This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
@@ -104,8 +68,8 @@
# create an instance from volume
LOG.info("Booting instance 1 from volume")
- volume_origin = self._create_volume_from_image()
- instance_1st = self._boot_instance_from_resource(
+ volume_origin = self.create_volume_from_image()
+ instance_1st = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
@@ -124,7 +88,7 @@
self._delete_server(instance_1st)
# create a 2nd instance from volume
- instance_2nd = self._boot_instance_from_resource(
+ instance_2nd = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
@@ -149,10 +113,10 @@
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
server_from_snapshot = (
- self._boot_instance_from_resource(source_id=volume['id'],
- source_type='volume',
- keypair=keypair,
- security_group=security_group))
+ self.boot_instance_from_resource(source_id=volume['id'],
+ source_type='volume',
+ keypair=keypair,
+ security_group=security_group))
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
@@ -171,13 +135,13 @@
@utils.services('compute', 'image', 'volume')
def test_create_server_from_volume_snapshot(self):
# Create a volume from an image
- boot_volume = self._create_volume_from_image()
+ boot_volume = self.create_volume_from_image()
# Create a snapshot
boot_snapshot = self.create_volume_snapshot(boot_volume['id'])
# Create a server from a volume snapshot
- server = self._boot_instance_from_resource(
+ server = self.boot_instance_from_resource(
source_id=boot_snapshot['id'],
source_type='snapshot',
delete_on_termination=True)
@@ -203,16 +167,23 @@
self.assertEqual(created_volume[0]['id'],
created_volume_info['attachments'][0]['volume_id'])
+ # Delete the server and wait
+ self._delete_server(server)
+
+ # Assert that the underlying volume is gone before class tearDown
+ # to prevent snapshot deletion from failing
+ self.volumes_client.wait_for_resource_deletion(created_volume[0]['id'])
+
@decorators.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
'Cinder volume snapshots are disabled')
@utils.services('compute', 'volume', 'image')
def test_image_defined_boot_from_volume(self):
# create an instance from image-backed volume
- volume_origin = self._create_volume_from_image()
+ volume_origin = self.create_volume_from_image()
name = data_utils.rand_name(self.__class__.__name__ +
'-volume-backed-server')
- instance1 = self._boot_instance_from_resource(
+ instance1 = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
delete_on_termination=True,
@@ -264,7 +235,7 @@
bdms = image.get('block_device_mapping')
if not bdms:
bdms = image['properties']['block_device_mapping']
- bdms = jsonutils.loads(bdms)
+ bdms = json.loads(bdms)
snapshot_id = bdms[0]['snapshot_id']
self._delete_snapshot(snapshot_id)
@@ -281,14 +252,13 @@
@utils.services('compute', 'volume')
def test_boot_server_from_encrypted_volume_luks(self):
# Create an encrypted volume
- volume = self.create_encrypted_volume('nova.volume.encryptors.'
- 'luks.LuksEncryptor',
+ volume = self.create_encrypted_volume('luks',
volume_type='luks')
self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
# Boot a server from the encrypted volume
- server = self._boot_instance_from_resource(
+ server = self.boot_instance_from_resource(
source_id=volume['id'],
source_type='volume',
delete_on_termination=False)
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index c54bb38..106500e 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -33,6 +33,9 @@
* Write to the volume
* Perform a cinder retype --on-demand of the volume to type of backend #2
* Check written content of migrated volume
+ * Check the type of the volume has been updated.
+ * Check the volume is still in-use and the migration was successful.
+ * Check that the same volume is attached to the instance.
"""
credentials = ['primary', 'admin']
@@ -78,7 +81,8 @@
'src_backend': backend_source,
'dst': dest_body['name'],
'dst_backend': backend_dest})
- return source_body['name'], dest_body['name']
+ return ({'name': source_body['name'], 'host': backend_source},
+ {'name': dest_body['name'], 'host': backend_dest})
def _volume_retype_with_migration(self, volume_id, new_volume_type):
# NOTE: The 'on-demand' migration requires admin operation, so
@@ -93,7 +97,7 @@
@decorators.attr(type='slow')
@decorators.idempotent_id('deadd2c2-beef-4dce-98be-f86765ff311b')
@utils.services('compute', 'volume')
- def test_volume_migrate_attached(self):
+ def test_volume_retype_attached(self):
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self._create_security_group()
@@ -104,11 +108,11 @@
# create an instance from volume
LOG.info("Booting instance from volume")
- volume_origin = self.create_volume(imageRef=CONF.compute.image_ref,
- volume_type=source_type)
+ volume_id = self.create_volume(imageRef=CONF.compute.image_ref,
+ volume_type=source_type['name'])['id']
- instance = self._boot_instance_from_volume(volume_origin['id'],
- keypair, security_group)
+ instance = self._boot_instance_from_volume(volume_id, keypair,
+ security_group)
# write content to volume on instance
LOG.info("Setting timestamp in instance %s", instance['id'])
@@ -118,9 +122,11 @@
server=instance)
# retype volume with migration from backend #1 to backend #2
- LOG.info("Retyping Volume %s to new type %s", volume_origin['id'],
- dest_type)
- self._volume_retype_with_migration(volume_origin['id'], dest_type)
+ LOG.info("Retyping Volume %s to new type %s", volume_id,
+ dest_type['name'])
+ # This method calls for the retype of the volume before calling a
+ # waiter that asserts that the volume type has changed successfully.
+ self._volume_retype_with_migration(volume_id, dest_type['name'])
# check the content of written file
LOG.info("Getting timestamp in postmigrated instance %s",
@@ -129,3 +135,82 @@
private_key=keypair['private_key'],
server=instance)
self.assertEqual(timestamp, timestamp2)
+
+ # Assert that the volume is on the new host, is still in-use and has a
+ # migration_status of success
+ volume = self.admin_volumes_client.show_volume(volume_id)['volume']
+ # dest_type is host@backend, os-vol-host-attr:host is host@backend#type
+ self.assertIn(dest_type['host'], volume['os-vol-host-attr:host'])
+ self.assertEqual('in-use', volume['status'])
+ self.assertEqual('success', volume['migration_status'])
+
+ # Assert that the same volume id is attached to the instance, ensuring
+ # the os-migrate_volume_completion Cinder API has been called.
+ attached_volumes = self.servers_client.list_volume_attachments(
+ instance['id'])['volumeAttachments']
+ self.assertEqual(volume_id, attached_volumes[0]['id'])
+
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('fe47b1ed-640e-4e3b-a090-200e25607362')
+ @utils.services('compute', 'volume')
+ def test_volume_migrate_attached(self):
+ LOG.info("Creating keypair and security group")
+ keypair = self.create_keypair()
+ security_group = self._create_security_group()
+
+ LOG.info("Creating volume")
+ # Create a unique volume type to avoid using the backend default
+ migratable_type = self.create_volume_type()['name']
+ volume_id = self.create_volume(imageRef=CONF.compute.image_ref,
+ volume_type=migratable_type)['id']
+ volume = self.admin_volumes_client.show_volume(volume_id)
+
+ LOG.info("Booting instance from volume")
+ instance = self._boot_instance_from_volume(volume_id, keypair,
+ security_group)
+
+ # Identify the source and destination hosts for the migration
+ src_host = volume['volume']['os-vol-host-attr:host']
+
+ # Select the first c-vol host that isn't hosting the volume as the dest
+ # host['host_name'] should take the format of host@backend.
+ # src_host should take the format of host@backend#type
+ hosts = self.admin_volumes_client.list_hosts()['hosts']
+ for host in hosts:
+ if (host['service'] == 'cinder-volume' and
+ not src_host.startswith(host['host_name'])):
+ dest_host = host['host_name']
+ break
+
+ ip_instance = self.get_server_ip(instance)
+ timestamp = self.create_timestamp(ip_instance,
+ private_key=keypair['private_key'],
+ server=instance)
+
+ LOG.info("Migrating Volume %s from host %s to host %s",
+ volume_id, src_host, dest_host)
+ self.admin_volumes_client.migrate_volume(volume_id, host=dest_host)
+
+ # This waiter asserts that the migration_status is success and that
+ # the volume has moved to the dest_host
+ waiters.wait_for_volume_migration(self.admin_volumes_client, volume_id,
+ dest_host)
+
+ # check the content of written file
+ LOG.info("Getting timestamp in postmigrated instance %s",
+ instance['id'])
+ timestamp2 = self.get_timestamp(ip_instance,
+ private_key=keypair['private_key'],
+ server=instance)
+ self.assertEqual(timestamp, timestamp2)
+
+ # Assert that the volume is in-use
+ volume = self.admin_volumes_client.show_volume(volume_id)['volume']
+ self.assertEqual('in-use', volume['status'])
+
+ # Assert that the same volume id is attached to the instance, ensuring
+ # the os-migrate_volume_completion Cinder API has been called
+ attached_volumes = self.servers_client.list_volume_attachments(
+ instance['id'])['volumeAttachments']
+ attached_volume_id = attached_volumes[0]['id']
+ self.assertEqual(volume_id, attached_volume_id)
diff --git a/tempest/test.py b/tempest/test.py
index f2babbb..f383bc1 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -27,7 +27,9 @@
from tempest.common import credentials_factory as credentials
from tempest.common import utils
from tempest import config
+from tempest.lib import base as lib_base
from tempest.lib.common import fixed_network
+from tempest.lib.common import profiler
from tempest.lib.common import validation_resources as vr
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -147,11 +149,25 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
# All the configuration checks that may generate a skip
- cls.skip_checks()
- if not cls.__skip_checks_called:
- raise RuntimeError("skip_checks for %s did not call the super's "
- "skip_checks" % cls.__name__)
+ # TODO(gmann): cls.handle_skip_exception is really workaround for
+ # testtools bug- https://github.com/testing-cabal/testtools/issues/272
+ # stestr which is used by Tempest internally to run the test switch
+ # the customize test runner(which use stdlib unittest) for >=py3.5
+ # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
+ # These two test runner are not compatible due to skip exception
+ # handling(due to unittest2). testtools.run treat unittestt.SkipTest
+ # as error and stdlib unittest treat unittest2.case.SkipTest raised
+ # by testtools.TestCase.skipException.
+ # The below workaround can be removed once testtools fix issue# 272.
+ orig_skip_exception = testtools.TestCase.skipException
+ lib_base._handle_skip_exception()
try:
+ cls.skip_checks()
+
+ if not cls.__skip_checks_called:
+ raise RuntimeError(
+ "skip_checks for %s did not call the super's "
+ "skip_checks" % cls.__name__)
# Allocation of all required credentials and client managers
cls._teardowns.append(('credentials', cls.clear_credentials))
cls.setup_credentials()
@@ -172,6 +188,8 @@
six.reraise(etype, value, trace)
finally:
del trace # to avoid circular refs
+ finally:
+ testtools.TestCase.skipException = orig_skip_exception
@classmethod
def tearDownClass(cls):
@@ -231,6 +249,9 @@
if CONF.pause_teardown:
BaseTestCase.insert_pdb_breakpoint()
+ if CONF.profiler.key:
+ profiler.disable()
+
@classmethod
def insert_pdb_breakpoint(cls):
"""Add pdb breakpoint.
@@ -259,6 +280,7 @@
based on the result of an API call are discouraged.
The following checks are implemented in `test.py` already:
+
- check that alt credentials are available when requested by the test
- check that admin credentials are available when requested by the test
- check that the identity version specified by the test is marked as
@@ -310,6 +332,7 @@
`os_[type]`:
Valid values in `credentials` are:
+
- 'primary':
A normal user is provisioned.
It can be used only once. Multiple entries will be ignored.
@@ -581,7 +604,7 @@
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.__setupclass_called:
- raise RuntimeError("setUpClass does not calls the super's"
+ raise RuntimeError("setUpClass does not calls the super's "
"setUpClass in the " +
self.__class__.__name__)
at_exit_set.add(self.__class__)
@@ -606,6 +629,8 @@
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
+ if CONF.profiler.key:
+ profiler.enable(CONF.profiler.key)
@property
def credentials_provider(self):
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 9c18052..b20b60e 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -179,6 +179,7 @@
This class is used to manage the lifecycle of external tempest test
plugins. It provides functions for getting set
"""
+
def __init__(self):
self.ext_plugins = stevedore.ExtensionManager(
'tempest.test_plugins', invoke_on_load=True,
@@ -193,11 +194,14 @@
def get_plugin_load_tests_tuple(self):
load_tests_dict = {}
for plug in self.ext_plugins:
+ LOG.info('Loading tests from Tempest plugin: %s', plug.name)
load_tests_dict[plug.name] = plug.obj.load_tests()
return load_tests_dict
def register_plugin_opts(self, conf):
for plug in self.ext_plugins:
+ LOG.info('Register additional config options from Tempest '
+ 'plugin: %s', plug.name)
try:
plug.obj.register_opts(conf)
except Exception:
@@ -208,6 +212,9 @@
plugin_options = []
for plug in self.ext_plugins:
opt_list = plug.obj.get_opt_lists()
+ LOG.info('List additional config options registered by '
+ 'Tempest plugin: %s', plug.name)
+
if opt_list:
plugin_options.extend(opt_list)
return plugin_options
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 330f370..143c6e1 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -37,7 +37,7 @@
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
- top_level_dir=base_path))
+ top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index fd9af08..a962e37 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -28,7 +28,6 @@
self.os_username = 'fake_user'
self.os_password = 'fake_password'
self.os_project_name = 'fake_project_name'
- self.os_tenant_name = None
self.os_domain_name = 'fake_domain'
self.tag = 'fake'
self.concurrency = 2
@@ -100,13 +99,6 @@
self.assertEqual(self.opts.os_password, admin_creds.password)
self.assertFalse(hasattr(admin_creds, 'domain_name'))
- def test_get_credential_provider_with_tenant(self):
- self.opts.os_project_name = None
- self.opts.os_tenant_name = 'fake_tenant'
- cp = account_generator.get_credential_provider(self.opts)
- admin_creds = cp.default_admin_creds
- self.assertEqual(self.opts.os_tenant_name, admin_creds.tenant_name)
-
class TestAccountGeneratorV3(TestAccountGeneratorV2):
@@ -208,9 +200,9 @@
resources = account_generator.generate_resources(
self.cred_provider, admin=True)
resource_types = [k for k, _ in resources]
- # all options on, expect six credentials
- self.assertEqual(6, len(resources))
- # Ensure create_user was invoked 6 times (6 distinct users)
+ # all options on, expect five credentials
+ self.assertEqual(5, len(resources))
+ # Ensure create_user was invoked 5 times (5 distinct users)
self.assertEqual(5, self.user_create_fixture.mock.call_count)
self.assertIn('primary', resource_types)
self.assertIn('alt', resource_types)
@@ -222,6 +214,30 @@
self.assertIsNotNone(resource[1].router)
self.assertIsNotNone(resource[1].subnet)
+ def test_generate_resources_swift_no_admin(self):
+ cfg.CONF.set_default('swift', True, group='service_available')
+ cfg.CONF.set_default('operator_role', 'fake_operator',
+ group='object-storage')
+ cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
+ group='object-storage')
+ resources = account_generator.generate_resources(
+ self.cred_provider, admin=False)
+ resource_types = [k for k, _ in resources]
+ # No Admin, swift, expect four credentials only
+ self.assertEqual(4, len(resources))
+ # Ensure create_user was invoked 4 times (4 distinct users)
+ self.assertEqual(4, self.user_create_fixture.mock.call_count)
+ self.assertIn('primary', resource_types)
+ self.assertIn('alt', resource_types)
+ self.assertNotIn('admin', resource_types)
+ self.assertIn(['fake_operator'], resource_types)
+ self.assertIn(['fake_reseller'], resource_types)
+ self.assertNotIn(['fake_owner'], resource_types)
+ for resource in resources:
+ self.assertIsNotNone(resource[1].network)
+ self.assertIsNotNone(resource[1].router)
+ self.assertIsNotNone(resource[1].subnet)
+
class TestGenerateResourcesV3(TestGenerateResourcesV2):
@@ -267,14 +283,14 @@
# Ordered args in [0], keyword args in [1]
accounts, f = yaml_dump_mock.call_args[0]
self.assertEqual(handle, f)
- self.assertEqual(6, len(accounts))
+ self.assertEqual(5, len(accounts))
if self.domain_is_in:
self.assertIn('domain_name', accounts[0].keys())
else:
self.assertNotIn('domain_name', accounts[0].keys())
self.assertEqual(1, len([x for x in accounts if
x.get('types') == ['admin']]))
- self.assertEqual(3, len([x for x in accounts if 'roles' in x]))
+ self.assertEqual(2, len([x for x in accounts if 'roles' in x]))
for account in accounts:
self.assertIn('resources', account)
self.assertIn('network', account.get('resources'))
@@ -298,14 +314,14 @@
# Ordered args in [0], keyword args in [1]
accounts, f = yaml_dump_mock.call_args[0]
self.assertEqual(handle, f)
- self.assertEqual(6, len(accounts))
+ self.assertEqual(5, len(accounts))
if self.domain_is_in:
self.assertIn('domain_name', accounts[0].keys())
else:
self.assertNotIn('domain_name', accounts[0].keys())
self.assertEqual(1, len([x for x in accounts if
x.get('types') == ['admin']]))
- self.assertEqual(3, len([x for x in accounts if 'roles' in x]))
+ self.assertEqual(2, len([x for x in accounts if 'roles' in x]))
for account in accounts:
self.assertIn('resources', account)
self.assertIn('network', account.get('resources'))
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
new file mode 100644
index 0000000..1618df9
--- /dev/null
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -0,0 +1,42 @@
+# Copyright 2018 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from tempest.cmd import cleanup
+from tempest.tests import base
+
+
+class TestTempestCleanup(base.TestCase):
+
+ def test_load_json(self):
+ # instantiate "empty" TempestCleanup
+ c = cleanup.TempestCleanup(None, None, 'test')
+ test_saved_json = 'tempest/tests/cmd/test_saved_state_json.json'
+ # test if the file is loaded without any issues/exceptions
+ c._load_json(test_saved_json)
+
+ @mock.patch('tempest.cmd.cleanup.TempestCleanup.init')
+ @mock.patch('tempest.cmd.cleanup.TempestCleanup._cleanup')
+ def test_take_action_got_exception(self, mock_cleanup, mock_init):
+ c = cleanup.TempestCleanup(None, None, 'test')
+ c.GOT_EXCEPTIONS.append('exception')
+ mock_cleanup.return_value = True
+ mock_init.return_value = True
+ try:
+ c.take_action(mock.Mock())
+ except Exception as exc:
+ self.assertEqual(str(exc), '[\'exception\']')
+ return
+ assert False
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
new file mode 100644
index 0000000..7bf7315
--- /dev/null
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -0,0 +1,1778 @@
+# Copyright 2018 AT&T Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from oslo_serialization import jsonutils as json
+from tempest import clients
+from tempest.cmd import cleanup_service
+from tempest import config
+from tempest.lib import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib import fake_http
+
+
+class TestBaseService(base.TestCase):
+
+ class TestException(cleanup_service.BaseService):
+ def delete(self):
+ raise exceptions.NotImplemented
+
+ def dry_run(self):
+ raise exceptions.NotImplemented
+
+ def save_state(self):
+ raise exceptions.NotImplemented
+
+ def test_base_service_init(self):
+ kwargs = {'data': {'data': 'test'},
+ 'is_dry_run': False,
+ 'saved_state_json': {'saved': 'data'},
+ 'is_preserve': False,
+ 'is_save_state': True,
+ 'tenant_id': 'project_id',
+ 'got_exceptions': []}
+ base = cleanup_service.BaseService(kwargs)
+ self.assertEqual(base.data, kwargs['data'])
+ self.assertFalse(base.is_dry_run)
+ self.assertEqual(base.saved_state_json, kwargs['saved_state_json'])
+ self.assertFalse(base.is_preserve)
+ self.assertTrue(base.is_save_state)
+ self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
+ self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
+
+ def test_not_implemented_ex(self):
+ kwargs = {'data': {'data': 'test'},
+ 'is_dry_run': False,
+ 'saved_state_json': {'saved': 'data'},
+ 'is_preserve': False,
+ 'is_save_state': False,
+ 'tenant_id': 'project_id',
+ 'got_exceptions': []}
+ base = self.TestException(kwargs)
+ # delete
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 1)
+ # save_state
+ base.save_state = True
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 2)
+ # dry_run
+ base.is_dry_run = True
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 3)
+
+
+class MockFunctionsBase(base.TestCase):
+
+ def _create_response(self, body, status, headers):
+ if status:
+ if body:
+ body = json.dumps(body)
+ resp = fake_http.fake_http_response(headers, status=status), body
+ return resp
+ else:
+ return body
+
+ def _create_fixtures(self, fixtures_to_make):
+ mocked_fixtures = []
+ for fixture in fixtures_to_make:
+ func, body, status = fixture
+ mocked_response = self._create_response(body, status, None)
+ if mocked_response == 'error':
+ mocked_func = self.useFixture(fixtures.MockPatch(
+ func, side_effect=Exception("error")))
+ else:
+ mocked_func = self.useFixture(fixtures.MockPatch(
+ func, return_value=mocked_response))
+ mocked_fixtures.append(mocked_func)
+ return mocked_fixtures
+
+ def run_function_with_mocks(self, function_to_run, functions_to_mock):
+ """Mock a service client function for testing.
+
+ :param function_to_run: The service client function to call.
+ :param functions_to_mock: a list of tuples containing the function
+ to mock, the response body, and the response status.
+ EX:
+ ('tempest.lib.common.rest_client.RestClient.get',
+ {'users': ['']},
+ 200)
+ """
+ mocked_fixtures = self._create_fixtures(functions_to_mock)
+ func_return = function_to_run()
+ return func_return, mocked_fixtures
+
+
+class BaseCmdServiceTests(MockFunctionsBase):
+
+ def setUp(self):
+ super(BaseCmdServiceTests, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+ self.useFixture(fixtures.MockPatch(
+ 'tempest.cmd.cleanup_service._get_network_id',
+ return_value=''))
+ cleanup_service.init_conf()
+ self.conf_values = {"flavors": cleanup_service.CONF_FLAVORS[0],
+ "images": cleanup_service.CONF_IMAGES[0],
+ "projects": cleanup_service.CONF_PROJECTS[0],
+ "users": cleanup_service.CONF_USERS[0],
+ "networks": cleanup_service.CONF_PUB_NETWORK,
+ "security_groups":
+ cleanup_service.CONF_PROJECTS[0],
+ "ports": cleanup_service.CONF_PUB_NETWORK,
+ "routers": cleanup_service.CONF_PUB_ROUTER,
+ "subnetpools": cleanup_service.CONF_PROJECTS[0],
+ }
+
+ saved_state = {
+ # Static list to ensure global service saved items are not deleted
+ "users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
+ "flavors": {u'42': u'm1.tiny'},
+ "images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
+ "roles": {u'3efrt74r45hn': u'president'},
+ "projects": {u'f38ohgp93jj032': u'manhattan'},
+ "domains": {u'default': u'Default'},
+ # Static list to ensure project service saved items are not deleted
+ "snapshots": {u'1ad4c789-7e8w-4dwg-afc5': u'saved-snapshot'},
+ "servers": {u'7a6d4v7w-36ds-4216': u'saved-server'},
+ "server_groups": {u'as6d5f7g-46ca-475e': u'saved-server-group'},
+ "keypairs": {u'saved-key-pair': {
+ u'fingerprint': u'7e:eb:ab:24',
+ u'name': u'saved-key-pair'
+ }},
+ "volumes": {u'aa77asdf-1234': u'saved-volume'},
+ "networks": {u'6722fc13-4319': {
+ u'id': u'6722fc13-4319',
+ u'name': u'saved-network'
+ }},
+ "floatingips": {u'9e82d248-408a': {
+ u'id': u'9e82d248-408a',
+ u'status': u'ACTIVE'
+ }},
+ "routers": {u'4s5w34hj-id44': u'saved-router'},
+ "metering_label_rules": {u'93a973ce-4dc5': {
+ u'direction': u'ingress',
+ u'id': u'93a973ce-4dc5'
+ }},
+ "metering_labels": {u'723b346ce866-4c7q': u'saved-label'},
+ "ports": {u'aa74aa4v-741a': u'saved-port'},
+ "security_groups": {u'7q844add-3697': u'saved-sec-group'},
+ "subnets": {u'55ttda4a-2584': u'saved-subnet'},
+ "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'},
+ "regions": {u'RegionOne': {}}
+ }
+ # Mocked methods
+ get_method = 'tempest.lib.common.rest_client.RestClient.get'
+ delete_method = 'tempest.lib.common.rest_client.RestClient.delete'
+ log_method = 'tempest.cmd.cleanup_service.LOG.exception'
+ # Override parameters
+ service_class = 'BaseService'
+ response = None
+ service_name = 'default'
+
+ def _create_cmd_service(self, service_type, is_save_state=False,
+ is_preserve=False, is_dry_run=False):
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ os = clients.Manager(creds)
+ return getattr(cleanup_service, service_type)(
+ os,
+ is_save_state=is_save_state,
+ is_preserve=is_preserve,
+ is_dry_run=is_dry_run,
+ project_id='b8e3ece07bb049138d224436756e3b57',
+ data={},
+ saved_state_json=self.saved_state
+ )
+
+ def _test_delete(self, mocked_fixture_tuple_list, fail=False):
+ serv = self._create_cmd_service(self.service_class)
+ resp, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list,
+ )
+ for fixture in fixtures:
+ if fixture.mock.return_value == 'validate':
+ fixture.mock.assert_called()
+ elif fail is False and fixture.mock.return_value == 'exception':
+ fixture.mock.assert_not_called()
+ elif self.service_name in self.saved_state.keys():
+ fixture.mock.assert_called_once()
+ for key in self.saved_state[self.service_name].keys():
+ self.assertNotIn(key, fixture.mock.call_args[0][0])
+ else:
+ fixture.mock.assert_called_once()
+ self.assertFalse(serv.data)
+
+ def _test_dry_run_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_dry_run=True)
+ _, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list
+ )
+ for fixture in fixtures:
+ if fixture.mock.return_value == 'delete':
+ fixture.mock.assert_not_called()
+ elif self.service_name in self.saved_state.keys():
+ fixture.mock.assert_called_once()
+ for key in self.saved_state[self.service_name].keys():
+ self.assertNotIn(key, fixture.mock.call_args[0][0])
+ else:
+ fixture.mock.assert_called_once()
+
+ def _test_saved_state_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_save_state=True)
+ _, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list
+ )
+ for item in self.response[self.service_name]:
+ self.assertIn(item['id'],
+ serv.data[self.service_name])
+ for fixture in fixtures:
+ fixture.mock.assert_called_once()
+
+ def _test_is_preserve_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_preserve=True)
+ resp, fixtures = self.run_function_with_mocks(
+ serv.list,
+ mocked_fixture_tuple_list
+ )
+ for fixture in fixtures:
+ fixture.mock.assert_called_once()
+ self.assertIn(resp[0], self.response[self.service_name])
+ for rsp in resp:
+ self.assertNotIn(rsp['id'], self.conf_values.values())
+ self.assertNotIn(rsp['name'], self.conf_values.values())
+
+
+class TestSnapshotService(BaseCmdServiceTests):
+
+ service_class = 'SnapshotService'
+ service_name = 'snapshots'
+ response = {
+ "snapshots": [
+ {
+ "status": "available",
+ "metadata": {
+ "name": "test"
+ },
+ "name": "test-volume-snapshot",
+ "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506",
+ "created_at": "2015-11-29T02:25:51.000000",
+ "size": 1,
+ "updated_at": "2015-11-20T05:36:40.000000",
+ "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c",
+ "description": "volume snapshot"
+ },
+ {
+ "status": "available",
+ "name": "saved-snapshot",
+ "metadata": {},
+ "id": "1ad4c789-7e8w-4dwg-afc5",
+ "size": 1,
+ "volume_id": "af7c41be-1ff6-4233-a690-7ed61c34347f",
+ "created_at": "2015-11-20T05:39:40.000000",
+ "updated_at": "2015-11-20T05:39:40.000000",
+ "description": "snapshot in saved state"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestServerService(BaseCmdServiceTests):
+
+ service_class = 'ServerService'
+ service_name = 'servers'
+ response = {
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70-6ef0985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e7-6ef35",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ },
+ {
+ "id": "7a6d4v7w-36ds-4216",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70-6ef0985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e7-6ef35",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "saved-server"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestServerGroupService(BaseCmdServiceTests):
+
+ service_class = 'ServerGroupService'
+ service_name = 'server_groups'
+ validate_response = ('tempest.lib.services.compute.server_groups_client'
+ '.ServerGroupsClient.validate_response')
+
+ response = {
+ "server_groups": [
+ {
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "name": "test",
+ "policy": "anti-affinity",
+ "rules": {"max_server_per_host": 3},
+ "members": [],
+ "project_id": "6f70656e737461636b20342065766572",
+ "user_id": "fake"
+ },
+ {
+ "id": "as6d5f7g-46ca-475e",
+ "name": "saved-server-group"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None)
+ ])
+
+
+class TestKeyPairService(BaseCmdServiceTests):
+
+ service_class = 'KeyPairService'
+ service_name = 'keypairs'
+ validate_response = ('tempest.lib.services.compute.keypairs_client'
+ '.KeyPairsClient.validate_response')
+ response = {
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:bd",
+ "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF\n"
+ }
+ },
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24",
+ "name": "saved-key-pair"
+ }
+ }
+ ]
+ }
+
+ def _test_saved_state_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_save_state=True)
+ _, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list
+ )
+ for item in self.response[self.service_name]:
+ self.assertTrue(item['keypair']['name'],
+ serv.data[self.service_name])
+ for fixture in fixtures:
+ fixture.mock.assert_called_once()
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([
+ (self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None)
+ ])
+
+
+class TestVolumeService(BaseCmdServiceTests):
+
+ service_class = 'VolumeService'
+ service_name = 'volumes'
+ response = {
+ "volumes": [
+ {
+ "id": "efa54464-8fab-47cd-a05a-be3e6b396188",
+ "links": [
+ {
+ "href": "http://127.0.0.1:37097/v3/89af/volumes/efa54",
+ "rel": "self"
+ },
+ {
+ "href": "http://127.0.0.1:37097/89af/volumes/efa54464",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "volume-name"
+ },
+ {
+ "id": "aa77asdf-1234",
+ "name": "saved-volume"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestVolumeQuotaService(BaseCmdServiceTests):
+
+ service_class = 'VolumeQuotaService'
+ service_name = 'volume_quota_service'
+ response = {
+ "quota_set": {
+ "groups":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "per_volume_gigabytes":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "volumes":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "gigabytes":
+ {"reserved": 0, "limit": 1000, "in_use": 0},
+ "backup_gigabytes":
+ {"reserved": 0, "limit": 1000, "in_use": 0},
+ "snapshots":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "volumes_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "snapshots_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "backups":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "gigabytes_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "id": "b8e3ece07bb049138d224436756e3b57"
+ }
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 200),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+class TestNovaQuotaService(BaseCmdServiceTests):
+
+ service_class = 'NovaQuotaService'
+ service_name = 'nova_quota_service'
+ response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 128,
+ "maxPersonality": 5,
+ "totalServerGroupsUsed": 0,
+ "maxImageMeta": 128,
+ "maxPersonalitySize": 10240,
+ "maxTotalKeypairs": 100,
+ "maxSecurityGroupRules": 20,
+ "maxServerGroups": 10,
+ "totalCoresUsed": 0,
+ "totalRAMUsed": 0,
+ "totalInstancesUsed": 0,
+ "maxSecurityGroups": 10,
+ "totalFloatingIpsUsed": 0,
+ "maxTotalCores": 20,
+ "maxServerGroupMembers": 10,
+ "maxTotalFloatingIps": 10,
+ "totalSecurityGroupsUsed": 0,
+ "maxTotalInstances": 10,
+ "maxTotalRAMSize": 51200
+ }
+ }
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+class TestNetworkQuotaService(BaseCmdServiceTests):
+
+ service_class = 'NetworkQuotaService'
+ service_name = 'network_quota_service'
+ response = {
+ "quotas": [{
+ "subnet": 110,
+ "network": 100,
+ "floatingip": 50,
+ "tenant_id": "81e8490db559474dacb2212fca9cca2d",
+ "subnetpool": -1,
+ "security_group_rule": 100,
+ "trunk": -1,
+ "security_group": 10,
+ "router": 10,
+ "rbac_policy": 10, "project_id":
+ "81e8490db559474dacb2212fca9cca2d", "port": 500
+ }]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+# Begin network service classes
+class TestNetworkService(BaseCmdServiceTests):
+
+ service_class = 'NetworkService'
+ service_name = 'networks'
+ response = {
+ "networks": [
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "l2_adjacency": False,
+ "mtu": 1500,
+ "name": "net1",
+ "port_security_enabled": True,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "router:external": False,
+ "shared": False,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": True,
+ "description": "",
+ "is_default": False
+ },
+ {
+ "id": "6722fc13-4319",
+ "name": "saved-network"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['networks'].append(
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2017-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": cleanup_service.CONF_PUB_NETWORK,
+ "name": "net2",
+ "port_security_enabled": True,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2018-03-08T20:19:41",
+ "vlan_transparent": True,
+ "is_default": False
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkFloatingIpService(BaseCmdServiceTests):
+
+ service_class = 'NetworkFloatingIpService'
+ service_name = 'floatingips'
+ response = {
+ "floatingips": [
+ {
+ "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
+ "description": "for test",
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myfip",
+ "created_at": "2016-12-21T10:55:50Z",
+ "updated_at": "2016-12-21T10:55:53Z",
+ "revision_number": 1,
+ "project_id": "4969c491a3c74ee4af974e6d800c62de",
+ "tenant_id": "4969c491a3c74ee4af974e6d800c62de",
+ "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
+ "fixed_ip_address": "10.0.0.3",
+ "floating_ip_address": "172.24.4.228",
+ "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
+ "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
+ "status": "ACTIVE",
+ "port_details": {
+ "status": "ACTIVE",
+ "name": "",
+ "admin_state_up": True,
+ "network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
+ "device_owner": "compute:nova",
+ "mac_address": "fa:16:3e:b1:3b:30",
+ "device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
+ },
+ "tags": ["tag1,tag2"],
+ "port_forwardings": []
+ },
+ {
+ "id": "9e82d248-408a",
+ "status": "ACTIVE"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkRouterService(BaseCmdServiceTests):
+
+ service_class = 'NetworkRouterService'
+ service_name = 'routers'
+ validate_response = ('tempest.lib.services.network.routers_client'
+ '.RoutersClient.validate_response')
+ response = {
+ "routers": [
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
+ "external_gateway_info": {
+ "enable_snat": True,
+ "external_fixed_ips": [
+ {
+ "ip_address": "172.24.4.3",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
+ },
+ {
+ "ip_address": "2001:db8::c",
+ "subnet_id": "0c56df5d-ace5-46c8-8f4c-45fa4e334d18"
+ }
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
+ },
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "915a14a6-867b-4af7-83d1-70efceb146f9",
+ "name": "router2",
+ "revision_number": 1,
+ "routes": [
+ {
+ "destination": "179.24.1.0/24",
+ "nexthop": "172.24.3.99"
+ }
+ ],
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "tags": ["tag1,tag2"]
+ },
+ {
+ "id": "4s5w34hj-id44",
+ "name": "saved-router"
+ }
+ ],
+ # "ports" key is added to the response in order to simplify unit
+ # testing - it's because NetworkRouterService's delete method lists
+ # ports before deleting any router
+ "ports": []
+ }
+
+ def _test_delete(self, mocked_fixture_tuple_list, fail=False):
+ serv = self._create_cmd_service(self.service_class)
+ resp, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list,
+ )
+ for fixture in fixtures:
+ if fail is False and fixture.mock.return_value == 'exception':
+ fixture.mock.assert_not_called()
+ elif self.service_name in self.saved_state.keys():
+ fixture.mock.assert_called()
+ for key in self.saved_state[self.service_name].keys():
+ self.assertNotIn(key, fixture.mock.call_args[0][0])
+ self.assertFalse(serv.data)
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['routers'].append(
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "id": cleanup_service.CONF_PUB_ROUTER,
+ "name": "router-preserve",
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "tags": ["tag1,tag2"]
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkMeteringLabelRuleService(BaseCmdServiceTests):
+
+ service_class = 'NetworkMeteringLabelRuleService'
+ service_name = 'metering_label_rules'
+ response = {
+ "metering_label_rules": [
+ {
+ "remote_ip_prefix": "20.0.0.0/24",
+ "direction": "ingress",
+ "metering_label_id": "e131d186-b02d-4c0b-83d5-0c0725c4f812",
+ "id": "9536641a-7d14-4dc5-afaf-93a973ce0eb8",
+ "excluded": False
+ },
+ {
+ "direction": "ingress",
+ "id": "93a973ce-4dc5"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkMeteringLabelService(BaseCmdServiceTests):
+
+ service_class = 'NetworkMeteringLabelService'
+ service_name = 'metering_labels'
+ response = {
+ "metering_labels": [
+ {
+ "project_id": "45345b0ee1ea477fac0f541b2cb79cd4",
+ "tenant_id": "45345b0ee1ea477fac0f541b2cb79cd4",
+ "description": "label1 description",
+ "name": "label1",
+ "id": "a6700594-5b7a-4105-8bfe-723b346ce866",
+ "shared": False
+ },
+ {
+ "name": "saved-label",
+ "id": "723b346ce866-4c7q",
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkPortService(BaseCmdServiceTests):
+
+ service_class = 'NetworkPortService'
+ service_name = 'ports'
+ response = {
+ "ports": [
+ {
+ "admin_state_up": True,
+ "allowed_address_pairs": [],
+ "created_at": "2016-03-08T20:19:41",
+ "description": "",
+ "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
+ "device_owner": "",
+ "dns_assignment": {
+ "hostname": "myport",
+ "ip_address": "172.24.4.2",
+ "fqdn": "myport.my-domain.org"
+ },
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myport",
+ "extra_dhcp_opts": [
+ {
+ "opt_value": "pxelinux.0",
+ "ip_version": 4,
+ "opt_name": "bootfile-name"
+ }
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "172.24.4.2",
+ "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062"
+ }
+ ],
+ "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
+ "ip_allocation": "immediate",
+ "mac_address": "fa:16:3e:58:42:ed",
+ "name": "test_port",
+ "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3",
+ "project_id": "",
+ "revision_number": 1,
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "",
+ "updated_at": "2016-03-08T20:19:41",
+ "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+ "port_security_enabled": False
+ },
+ {
+ "id": "aa74aa4v-741a",
+ "name": "saved-port",
+ "device_owner": ""
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['ports'].append(
+ {
+ "created_at": "2018-03-08T20:19:41",
+ "description": "",
+ "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
+ "device_owner": "compute:router_gateway",
+ "id": "d80b1a3b-4fc1-49f3-952e-1fdy1ws542",
+ "ip_allocation": "immediate",
+ "mac_address": "fa:16:3e:58:42:ed",
+ "name": "preserve_port",
+ "network_id": cleanup_service.CONF_PUB_NETWORK,
+ "project_id": "",
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "",
+ "updated_at": "2018-03-08T20:19:41",
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkSecGroupService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSecGroupService'
+ service_name = 'security_groups'
+ response = {
+ "security_groups": [
+ {
+ "description": "default",
+ "id": "85cc3048-abc3-43cc-89b3-377341426ac5",
+ "name": "test",
+ "security_group_rules": [
+ {
+ "direction": "egress",
+ "ethertype": "IPv6",
+ "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
+ "security_group_id": "85cc3048-abc3-43cc-89b3-3773414",
+ "project_id": "e4f50856753b4dc6afee5fa6b9b6c550",
+ "revision_number": 1,
+ "tags": ["tag1,tag2"],
+ "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550",
+ "created_at": "2018-03-19T19:16:56Z",
+ "updated_at": "2018-03-19T19:16:56Z",
+ "description": ""
+ }
+ ]
+ },
+ {
+ "id": "7q844add-3697",
+ "name": "saved-sec-group"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['security_groups'].append(
+ {
+ "description": "default",
+ "id": "85cc3048-abc3-43cc-89b3-377341426ac5",
+ "name": "test",
+ "security_group_rules": [
+ {
+ "direction": "egress",
+ "ethertype": "IPv6",
+ "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
+ "security_group_id": "85cc3048-abc3-43cc-89b3-3773414",
+ "project_id": cleanup_service.CONF_PROJECTS[0],
+ "revision_number": 1,
+ "tags": ["tag1,tag2"],
+ "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550",
+ "created_at": "2018-03-19T19:16:56Z",
+ "updated_at": "2018-03-19T19:16:56Z",
+ "description": ""
+ }
+ ]
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkSubnetService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSubnetService'
+ service_name = 'subnets'
+ response = {
+ "subnets": [
+ {
+ "name": "private-subnet",
+ "enable_dhcp": True,
+ "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "dns_nameservers": [],
+ "allocation_pools": [
+ {
+ "start": "10.0.0.2",
+ "end": "10.0.0.254"
+ }
+ ],
+ "host_routes": [],
+ "ip_version": 4,
+ "gateway_ip": "10.0.0.1",
+ "cidr": "10.0.0.0/24",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2016-10-10T14:35:34Z",
+ "revision_number": 2,
+ "service_types": [],
+ "tags": ["tag1,tag2"],
+ "updated_at": "2016-10-10T14:35:34Z"
+ },
+ {
+ "id": "55ttda4a-2584",
+ "name": "saved-subnet"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['subnets'].append(
+ {
+ "name": "public-subnet",
+ "network_id": cleanup_service.CONF_PUB_NETWORK,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "ip_version": 4,
+ "gateway_ip": "10.0.0.1",
+ "cidr": "10.0.0.0/24",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2018-10-10T14:35:34Z",
+ "service_types": [],
+ "tags": ["tag1,tag2"],
+ "updated_at": "2018-10-10T14:35:34Z"
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkSubnetPoolsService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSubnetPoolsService'
+ service_name = 'subnetpools'
+ response = {
+ "subnetpools": [
+ {
+ "min_prefixlen": "64",
+ "default_prefixlen": "64",
+ "id": "03f761e6-eee0-43fc-a921-8acf64c14988",
+ "max_prefixlen": "64",
+ "name": "my-subnet-pool-ipv6",
+ "is_default": False,
+ "project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
+ "tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
+ "prefixes": [
+ "2001:db8:0:2::/64",
+ "2001:db8::/63"
+ ],
+ "ip_version": 6,
+ "shared": False,
+ "description": "",
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41",
+ "revision_number": 2,
+ "tags": ["tag1,tag2"]
+ },
+ {
+ "id": "8acf64c1-43fc",
+ "name": "saved-subnet-pool"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['subnetpools'].append(
+ {
+ "min_prefixlen": "64",
+ "default_prefixlen": "64",
+ "id": "9acf64c1-43fc",
+ "name": "preserve-pool",
+ "project_id": cleanup_service.CONF_PROJECTS[0],
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41"
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+# begin global services
+class TestRegionService(BaseCmdServiceTests):
+ service_class = 'RegionService'
+ service_name = 'regions'
+ response = {
+ "regions": [{
+ "parent_region_id": None,
+ "id": "RegionOne",
+ "links": {
+ "self":
+ "http://10.0.145.61:5000/v3/regions/RegionOne"
+ },
+ "description": ""
+ },
+ {
+ "parent_region_id": None,
+ "id": "RegionTwo",
+ "links": {
+ "self":
+ "http://10.0.145.61:5000/v3/regions/RegionTwo"
+ },
+ "description": ""
+ }],
+ "links": {
+ "self":
+ "http://10.0.145.61:5000/v3/regions",
+ "next": None,
+ "previous": None
+ }
+ }
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, "exception", None)]
+ self._test_delete(delete_mock)
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, "exception", None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestDomainService(BaseCmdServiceTests):
+
+ service_class = 'DomainService'
+ service_name = 'domains'
+ response = {
+ "domains": [
+ {
+ "description": "Destroy all humans",
+ "enabled": True,
+ "id": "5a75994a3",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/5a75994a3"
+ },
+ "name": "Sky_net"
+ },
+ {
+ "description": "Owns users and tenants on Identity API",
+ "enabled": False,
+ "id": "default",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/default"
+ },
+ "name": "Default"
+ }
+ ]
+ }
+
+ mock_update = ("tempest.lib.services.identity.v3."
+ "domains_client.DomainsClient.update_domain")
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None),
+ (self.mock_update, 'update', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None),
+ (self.mock_update, 'update', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestProjectsService(BaseCmdServiceTests):
+
+ service_class = 'ProjectService'
+ service_name = 'projects'
+ response = {
+ "projects": [
+ {
+ "is_domain": False,
+ "description": None,
+ "domain_id": "default",
+ "enabled": True,
+ "id": "f38ohgp93jj032",
+ "links": {
+ "self": "http://example.com/identity/v3/projects"
+ "/f38ohgp93jj032"
+ },
+ "name": "manhattan",
+ "parent_id": None
+ },
+ {
+ "is_domain": False,
+ "description": None,
+ "domain_id": "default",
+ "enabled": True,
+ "id": "098f89d3292ri4jf4",
+ "links": {
+ "self": "http://example.com/identity/v3/projects"
+ "/098f89d3292ri4jf4"
+ },
+ "name": "Apollo",
+ "parent_id": None
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['projects'].append(
+ {
+ "is_domain": False,
+ "description": None,
+ "domain_id": "default",
+ "enabled": True,
+ "id": "r343q98h09f3092",
+ "links": {
+ "self": "http://example.com/identity/v3/projects"
+ "/r343q98h09f3092"
+ },
+ "name": cleanup_service.CONF_PROJECTS[0],
+ "parent_id": None
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestImagesService(BaseCmdServiceTests):
+
+ service_class = 'ImageService'
+ service_name = 'images'
+ response = {
+ "images": [
+ {
+ "status": "ACTIVE",
+ "name": "stratus-0.3.2-x86_64-disk",
+ "id": "34yhwr-4t3q",
+ "updated": "2014-11-03T16:40:10Z",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/images/"
+ "34yhwr-4t3q",
+ "rel": "self"}],
+ "created": "2014-10-30T08:23:39Z",
+ "minDisk": 0,
+ "minRam": 0,
+ "progress": 0,
+ "metadata": {},
+ },
+ {
+ "status": "ACTIVE",
+ "name": "cirros-0.3.2-x86_64-disk",
+ "id": "1bea47ed-f6a9",
+ "updated": "2014-11-03T16:40:10Z",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/images/"
+ "1bea47ed-f6a9",
+ "rel": "self"}],
+ "created": "2014-10-30T08:23:39Z",
+ "minDisk": 0,
+ "minRam": 0,
+ "progress": 0,
+ "metadata": {},
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['images'].append(
+ {
+ "status": "ACTIVE",
+ "name": "cirros-0.3.2-x86_64-disk",
+ "id": cleanup_service.CONF_IMAGES[0],
+ "updated": "2014-11-03T16:40:10Z",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/images/"
+ "None",
+ "rel": "self"}],
+ "created": "2014-10-30T08:23:39Z",
+ "minDisk": 0,
+ "minRam": 0,
+ "progress": 0,
+ "metadata": {},
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestFlavorService(BaseCmdServiceTests):
+
+ service_class = 'FlavorService'
+ service_name = 'flavors'
+ response = {
+ "flavors": [
+ {
+ "disk": 1,
+ "id": "42",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/flavors/1",
+ "rel": "self"}, {
+ "href": "http://openstack.ex.com/openstack/flavors/1",
+ "rel": "bookmark"}],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 1,
+ "vcpus": 1
+ },
+ {
+ "disk": 2,
+ "id": "13",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/flavors/2",
+ "rel": "self"}, {
+ "href": "http://openstack.ex.com/openstack/flavors/2",
+ "rel": "bookmark"}],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 1,
+ "vcpus": 1
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['flavors'].append(
+ {
+ "disk": 3,
+ "id": cleanup_service.CONF_FLAVORS[0],
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/flavors/3",
+ "rel": "self"}, {
+ "href": "http://openstack.ex.com/openstack/flavors/3",
+ "rel": "bookmark"}],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 1,
+ "vcpus": 1
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestRoleService(BaseCmdServiceTests):
+
+ service_class = 'RoleService'
+ service_name = 'roles'
+ response = {
+ "roles": [
+ {
+ "domain_id": "FakeDomain",
+ "id": "3efrt74r45hn",
+ "name": "president",
+ "links": {
+ "self": "http://ex.com/identity/v3/roles/3efrt74r45hn"
+ }
+ },
+ {
+ "domain_id": 'FakeDomain',
+ "id": "39ruo5sdk040",
+ "name": "vice-p",
+ "links": {
+ "self": "http://ex.com/identity/v3/roles/39ruo5sdk040"
+ }
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestUserService(BaseCmdServiceTests):
+
+ service_class = 'UserService'
+ service_name = 'users'
+ response = {
+ "users": [
+ {
+ "domain_id": "TempestDomain",
+ "enabled": True,
+ "id": "e812fb332456423fdv1b1320121qwe2",
+ "links": {
+ "self": "http://example.com/identity/v3/users/"
+ "e812fb332456423fdv1b1320121qwe2",
+ },
+ "name": "Thunder",
+ "password_expires_at": "3102-11-06T15:32:17.000000",
+ },
+ {
+ "domain_id": "TempestDomain",
+ "enabled": True,
+ "id": "32rwef64245tgr20121qw324bgg",
+ "links": {
+ "self": "http://example.com/identity/v3/users/"
+ "32rwef64245tgr20121qw324bgg",
+ },
+ "name": "Lightning",
+ "password_expires_at": "1893-11-06T15:32:17.000000",
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['users'].append(
+ {
+ "domain_id": "TempestDomain",
+ "enabled": True,
+ "id": "23ads5tg3rtrhe30121qwhyth",
+ "links": {
+ "self": "http://example.com/identity/v3/users/"
+ "23ads5tg3rtrhe30121qwhyth",
+ },
+ "name": cleanup_service.CONF_USERS[0],
+ "password_expires_at": "1893-11-06T15:32:17.000000",
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 98ca12d..e9bbcc2 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -24,11 +24,20 @@
import six
from tempest.cmd import run
+from tempest.cmd import workspace
+from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.tests import base
+if six.PY2:
+ # Python 2 has not FileNotFoundError exception
+ FileNotFoundError = IOError
+
DEVNULL = open(os.devnull, 'wb')
atexit.register(DEVNULL.close)
+CONF = config.CONF
+
class TestTempestRun(base.TestCase):
@@ -40,7 +49,7 @@
args = mock.Mock(spec=argparse.Namespace)
setattr(args, 'smoke', False)
setattr(args, 'regex', '')
- self.assertIsNone(None, self.run_cmd._build_regex(args))
+ self.assertIsNone(self.run_cmd._build_regex(args))
def test__build_regex_smoke(self):
args = mock.Mock(spec=argparse.Namespace)
@@ -55,6 +64,12 @@
self.assertEqual(['i_am_a_fun_little_regex'],
self.run_cmd._build_regex(args))
+ def test__build_regex_smoke_regex(self):
+ args = mock.Mock(spec=argparse.Namespace)
+ setattr(args, "smoke", True)
+ setattr(args, 'regex', 'i_am_a_fun_little_regex')
+ self.assertEqual(['smoke'], self.run_cmd._build_regex(args))
+
class TestRunReturnCode(base.TestCase):
def setUp(self):
@@ -97,6 +112,27 @@
subprocess.call(['stestr', 'init'])
self.assertRunExit(['tempest', 'run', '--regex', 'passing'], 0)
+ def test_tempest_run_failing(self):
+ self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
+
+ def test_tempest_run_failing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
+
+ def test_tempest_run_blackregex_failing(self):
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+
+ def test_tempest_run_blackregex_failing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+
+ def test_tempest_run_blackregex_passing(self):
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+
+ def test_tempest_run_blackregex_passing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+
def test_tempest_run_fails(self):
self.assertRunExit(['tempest', 'run'], 1)
@@ -117,6 +153,15 @@
result = ["b\'" + x + "\'" for x in result]
self.assertEqual(result, tests)
+ def test_tempest_run_with_worker_file(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ worker_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(worker_file.close)
+ worker_file.write(
+ '- worker:\n - passing\n concurrency: 3'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--worker-file=%s' % path], 0)
+
def test_tempest_run_with_whitelist(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
@@ -125,7 +170,7 @@
whitelist_file.write('passing'.encode('utf-8'))
self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path], 0)
- def test_tempest_run_with_whitelist_with_regex(self):
+ def test_tempest_run_with_whitelist_regex_include_pass_check_fail(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
whitelist_file = os.fdopen(fd, 'wb', 0)
@@ -134,13 +179,132 @@
self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
'--regex', 'fail'], 1)
+ def test_tempest_run_with_whitelist_regex_include_pass_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ whitelist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(whitelist_file.close)
+ whitelist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ '--regex', 'passing'], 0)
+
+ def test_tempest_run_with_whitelist_regex_include_fail_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ whitelist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(whitelist_file.close)
+ whitelist_file.write('failing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ '--regex', 'pass'], 1)
+
def test_tempest_run_passes_with_config_file(self):
self.assertRunExit(['tempest', 'run',
'--config-file', self.stestr_conf_file,
'--regex', 'passing'], 0)
+ def test_tempest_run_with_blacklist_failing(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('failing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 0)
+
+ def test_tempest_run_with_blacklist_passing(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 1)
+
+ def test_tempest_run_with_blacklist_regex_exclude_fail_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('failing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ '--regex', 'pass'], 0)
+
+ def test_tempest_run_with_blacklist_regex_exclude_pass_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ '--regex', 'pass'], 1)
+
+ def test_tempest_run_with_blacklist_regex_exclude_pass_check_fail(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ '--regex', 'fail'], 1)
+
+
+class TestConfigPathCheck(base.TestCase):
+ def setUp(self):
+ super(TestConfigPathCheck, self).setUp()
+ self.run_cmd = run.TempestRun(None, None)
+
+ def test_tempest_run_set_config_path(self):
+ # Note: (mbindlish) This test is created for the bug id: 1783751
+ # Checking TEMPEST_CONFIG_DIR and TEMPEST_CONFIG is actually
+ # getting set in os environment when some data has passed to
+ # set the environment.
+
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+
+ self.run_cmd._set_env(path)
+ self.assertEqual(path, CONF._path)
+ self.assertIn('TEMPEST_CONFIG_DIR', os.environ)
+ self.assertEqual(path, os.path.join(os.environ['TEMPEST_CONFIG_DIR'],
+ os.environ['TEMPEST_CONFIG']))
+
+ def test_tempest_run_set_config_no_exist_path(self):
+ path = "fake/path"
+ self.assertRaisesRegex(FileNotFoundError,
+ 'Config file: .* doesn\'t exist',
+ self.run_cmd._set_env, path)
+
+ def test_tempest_run_no_config_path(self):
+ # Note: (mbindlish) This test is created for the bug id: 1783751
+ # Checking TEMPEST_CONFIG_DIR and TEMPEST_CONFIG should have no value
+ # in os environment when no data has passed to set the environment.
+
+ self.run_cmd._set_env("")
+ self.assertFalse(CONF._path)
+ self.assertNotIn('TEMPEST_CONFIG_DIR', os.environ)
+ self.assertNotIn('TEMPEST_CONFIG', os.environ)
+
class TestTakeAction(base.TestCase):
+ def setUp(self):
+ super(TestTakeAction, self).setUp()
+ self.name = data_utils.rand_name('workspace')
+ self.path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.path, ignore_errors=True)
+ store_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, store_dir, ignore_errors=True)
+ self.store_file = os.path.join(store_dir, 'workspace.yaml')
+ self.workspace_manager = workspace.WorkspaceManager(
+ path=self.store_file)
+ self.workspace_manager.register_new_workspace(self.name, self.path)
+
+ def _setup_test_dirs(self):
+ self.directory = tempfile.mkdtemp(prefix='tempest-unit')
+ self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True)
+ self.test_dir = os.path.join(self.directory, 'tests')
+ os.mkdir(self.test_dir)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+
def test_workspace_not_registered(self):
class Exception_(Exception):
pass
@@ -169,24 +333,116 @@
self.assertIn(workspace, exit_msg)
def test_config_file_specified(self):
- # Setup test dirs
- self.directory = tempfile.mkdtemp(prefix='tempest-unit')
- self.addCleanup(shutil.rmtree, self.directory)
- self.test_dir = os.path.join(self.directory, 'tests')
- os.mkdir(self.test_dir)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
-
+ self._setup_test_dirs()
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
parsed_args = mock.Mock()
parsed_args.workspace = None
parsed_args.state = None
parsed_args.list_tests = False
- parsed_args.config_file = '.stestr.conf'
+ parsed_args.config_file = path
with mock.patch('stestr.commands.run_command') as m:
m.return_value = 0
self.assertEqual(0, tempest_run.take_action(parsed_args))
m.assert_called()
+
+ def test_no_config_file_no_workspace_no_state(self):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+
+ parsed_args.workspace = None
+ parsed_args.state = None
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command'):
+ self.assertRaises(SystemExit, tempest_run.take_action, parsed_args)
+
+ def test_config_file_workspace_registered(self):
+ self._setup_test_dirs()
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = self.name
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = None
+ parsed_args.list_tests = False
+ parsed_args.config_file = path
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_workspace_registered_no_config_no_state(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = self.name
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = None
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+ mock_init_state.assert_not_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_no_config_file_no_workspace_state_true(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+
+ parsed_args.workspace = None
+ parsed_args.state = True
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command'):
+ self.assertRaises(SystemExit, tempest_run.take_action, parsed_args)
+ mock_init_state.assert_not_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_workspace_registered_no_config_state_true(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = self.name
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = True
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+ mock_init_state.assert_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_no_workspace_config_file_state_true(self, mock_init_state):
+ self._setup_test_dirs()
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = None
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = True
+ parsed_args.list_tests = False
+ parsed_args.config_file = path
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+ mock_init_state.assert_called()
diff --git a/tempest/tests/cmd/test_saved_state_json.json b/tempest/tests/cmd/test_saved_state_json.json
new file mode 100644
index 0000000..5c55331
--- /dev/null
+++ b/tempest/tests/cmd/test_saved_state_json.json
@@ -0,0 +1,16 @@
+{
+ "domains": {
+ "default": "Default"
+ },
+ "flavors": {
+ "1": "m1.tiny"
+ },
+ "images": {},
+ "projects": {
+ "268bcb63488b4aa2942ecaac0f85ed62": "demo"
+ },
+ "roles": {},
+ "users": {
+ "023e65a5922a454585a91c6af8310968": "demo"
+ }
+}
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 023703e..8dbba38 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -12,11 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+
import fixtures
import mock
from oslo_serialization import jsonutils as json
from tempest import clients
+from tempest.cmd import init
from tempest.cmd import verify_tempest_config
from tempest.common import credentials_factory
from tempest import config
@@ -565,3 +568,64 @@
extensions_client = verify_tempest_config.get_extension_client(
os, service)
self.assertIsInstance(extensions_client, rest_client.RestClient)
+
+ def test_get_extension_client_sysexit(self):
+ creds = credentials_factory.get_credentials(
+ fill_in=False, username='fake_user', project_name='fake_project',
+ password='fake_password')
+ os = clients.Manager(creds)
+ self.assertRaises(SystemExit,
+ verify_tempest_config.get_extension_client,
+ os, 'fakeservice')
+
+ def test_get_config_file(self):
+ conf_dir = os.path.join(os.getcwd(), 'etc/')
+ conf_file = "tempest.conf.sample"
+ local_sample_conf_file = os.path.join(conf_dir, conf_file)
+
+ def fake_environ_get(key, default=None):
+ if key == 'TEMPEST_CONFIG_DIR':
+ return conf_dir
+ elif key == 'TEMPEST_CONFIG':
+ return 'tempest.conf.sample'
+ return default
+
+ with mock.patch('os.environ.get', side_effect=fake_environ_get,
+ autospec=True):
+ init_cmd = init.TempestInit(None, None)
+ init_cmd.generate_sample_config(os.path.join(conf_dir, os.pardir))
+ self.assertTrue(os.path.isfile(local_sample_conf_file),
+ local_sample_conf_file)
+
+ file_pointer = verify_tempest_config._get_config_file()
+ self.assertEqual(local_sample_conf_file, file_pointer.name)
+
+ with open(local_sample_conf_file, 'r+') as f:
+ local_sample_conf_contents = f.read()
+ self.assertEqual(local_sample_conf_contents, file_pointer.read())
+
+ if file_pointer:
+ file_pointer.close()
+
+ def test_print_and_or_update_true(self):
+ with mock.patch.object(
+ verify_tempest_config, 'change_option') as test_mock:
+ verify_tempest_config.print_and_or_update(
+ 'fakeservice', 'fake-service-available', False, True)
+ test_mock.assert_called_once_with(
+ 'fakeservice', 'fake-service-available', False)
+
+ def test_print_and_or_update_false(self):
+ with mock.patch.object(
+ verify_tempest_config, 'change_option') as test_mock:
+ verify_tempest_config.print_and_or_update(
+ 'fakeservice', 'fake-service-available', False, False)
+ test_mock.assert_not_called()
+
+ def test_contains_version_positive_data(self):
+ self.assertTrue(
+ verify_tempest_config.contains_version('v1.', ['v1.0', 'v2.0']))
+
+ def test_contains_version_negative_data(self):
+ self.assertFalse(
+ verify_tempest_config.contains_version('v5.', ['v1.0', 'v2.0']))
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index 3ed8a10..7a6b576 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -48,7 +48,7 @@
stdout, stderr = process.communicate()
return_code = process.returncode
msg = ("%s failed with:\nstdout: %s\nstderr: %s" % (' '.join(cmd),
- stdout, stderr))
+ stdout, stderr))
self.assertEqual(return_code, expected, msg)
def test_run_workspace_list(self):
@@ -122,22 +122,157 @@
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+ def test_workspace_manager_rename_no_name_exist(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ self.name, no_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty name is specified."
+ " Please specify correct name for workspace.\n")
+
+ def test_workspace_manager_rename_with_existing_name(self):
+ new_name = self.name
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ self.name, new_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace already exists with name: %s.\n"
+ % new_name)
+
+ def test_workspace_manager_rename_no_exist_old_name(self):
+ old_name = ""
+ new_name = data_utils.rand_uuid()
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ old_name, new_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % old_name)
+
+ def test_workspace_manager_rename_integer_data(self):
+ old_name = self.name
+ new_name = 12345
+ self.workspace_manager.rename_workspace(old_name, new_name)
+ self.assertIsNone(self.workspace_manager.get_workspace(old_name))
+ self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+
+ def test_workspace_manager_rename_alphanumeric_data(self):
+ old_name = self.name
+ new_name = 'abc123'
+ self.workspace_manager.rename_workspace(old_name, new_name)
+ self.assertIsNone(self.workspace_manager.get_workspace(old_name))
+ self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+
def test_workspace_manager_move(self):
new_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
self.workspace_manager.move_workspace(self.name, new_path)
self.assertEqual(
self.workspace_manager.get_workspace(self.name), new_path)
+ # NOTE(mbindlish): Also checking for the workspace that it
+ # shouldn't exist in old path
+ self.assertNotEqual(
+ self.workspace_manager.get_workspace(self.name), self.path)
+
+ def test_workspace_manager_move_wrong_path(self):
+ new_path = 'wrong/path'
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ self.name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "Path does not exist.\n")
+
+ def test_workspace_manager_move_wrong_workspace(self):
+ workspace_name = "wrong_workspace_name"
+ new_path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ workspace_name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % workspace_name)
+
+ def test_workspace_manager_move_no_workspace_name(self):
+ workspace_name = ""
+ new_path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ workspace_name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % workspace_name)
+
+ def test_workspace_manager_move_no_workspace_path(self):
+ new_path = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ self.name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty path is specified for workspace."
+ " Please specify correct workspace path.\n")
def test_workspace_manager_remove_entry(self):
self.workspace_manager.remove_workspace_entry(self.name)
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
+ def test_workspace_manager_remove_entry_no_name(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_entry,
+ no_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % no_name)
+
+ def test_workspace_manager_remove_entry_wrong_name(self):
+ wrong_name = "wrong_name"
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_entry,
+ wrong_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % wrong_name)
+
def test_workspace_manager_remove_directory(self):
path = self.workspace_manager.remove_workspace_entry(self.name)
self.workspace_manager.remove_workspace_directory(path)
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
+ def test_workspace_manager_remove_directory_no_path(self):
+ no_path = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_directory,
+ no_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty path is specified for workspace."
+ " Please specify correct workspace path.\n")
+
def test_path_expansion(self):
name = data_utils.rand_uuid()
path = os.path.join("~", name)
@@ -154,8 +289,11 @@
nonexistent_name)
self.assertEqual(1, ex.code)
self.assertEqual(mock_stdout.getvalue(),
- "A workspace was not found with name: %s\n" %
- nonexistent_name)
+ "A workspace was not found with name: %s\n"
+ % nonexistent_name)
+
+ def test_workspace_name_exists(self):
+ self.assertIsNone(self.workspace_manager._name_exists(self.name))
def test_workspace_name_already_exists(self):
duplicate_name = self.name
@@ -169,6 +307,11 @@
"A workspace already exists with name: %s.\n"
% duplicate_name)
+ def test_workspace_name_exists_check_new_name(self):
+ new_name = "fake_name"
+ self.assertIsNone(self.workspace_manager.
+ _workspace_name_exists(new_name))
+
def test_workspace_manager_path_not_exist(self):
fake_path = "fake_path"
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
@@ -179,8 +322,55 @@
self.assertEqual(mock_stdout.getvalue(),
"Path does not exist.\n")
+ def test_validate_path_exists(self):
+ new_path = self.path
+ self.assertIsNone(self.workspace_manager.
+ _validate_path(new_path))
+
def test_workspace_manager_list_workspaces(self):
listed = self.workspace_manager.list_workspaces()
self.assertEqual(1, len(listed))
self.assertIn(self.name, listed)
self.assertEqual(self.path, listed.get(self.name))
+
+ def test_register_new_workspace_no_name(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ register_new_workspace,
+ no_name, self.path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty name is specified."
+ " Please specify correct name for workspace.\n")
+
+ def test_register_new_workspace_no_path(self):
+ no_path = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ register_new_workspace,
+ self.name, no_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty path is specified for workspace."
+ " Please specify correct workspace path.\n")
+
+ def test_register_new_workspace_integer_data(self):
+ workspace_name = 12345
+ self.workspace_manager.register_new_workspace(
+ workspace_name, self.path)
+ self.assertIsNotNone(
+ self.workspace_manager.get_workspace(workspace_name))
+ self.assertEqual(
+ self.workspace_manager.get_workspace(workspace_name), self.path)
+
+ def test_register_new_workspace_alphanumeric_data(self):
+ workspace_name = 'abc123'
+ self.workspace_manager.register_new_workspace(
+ workspace_name, self.path)
+ self.assertIsNotNone(
+ self.workspace_manager.get_workspace(workspace_name))
+ self.assertEqual(
+ self.workspace_manager.get_workspace(workspace_name), self.path)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
old mode 100644
new mode 100755
index 938d226..e3bb836
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -15,6 +15,7 @@
import time
import mock
+from oslo_utils.fixture import uuidsentinel as uuids
from tempest.common import waiters
from tempest import exceptions
@@ -54,25 +55,6 @@
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
- @mock.patch.object(time, 'sleep')
- def test_wait_for_volume_status_error_restoring(self, mock_sleep):
- # Tests that the wait method raises VolumeRestoreErrorException if
- # the volume status is 'error_restoring'.
- client = mock.Mock(spec=volumes_client.VolumesClient,
- resource_type="volume",
- build_interval=1)
- volume1 = {'volume': {'status': 'restoring-backup'}}
- volume2 = {'volume': {'status': 'error_restoring'}}
- mock_show = mock.Mock(side_effect=(volume1, volume2))
- client.show_volume = mock_show
- volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
- self.assertRaises(exceptions.VolumeRestoreErrorException,
- waiters.wait_for_volume_resource_status,
- client, volume_id, 'available')
- mock_show.assert_has_calls([mock.call(volume_id),
- mock.call(volume_id)])
- mock_sleep.assert_called_once_with(1)
-
class TestInterfaceWaiters(base.TestCase):
@@ -148,3 +130,154 @@
list_interfaces.assert_has_calls([mock.call('server_id'),
mock.call('server_id')])
sleep.assert_called_once_with(client.build_interval)
+
+
+class TestVolumeWaiters(base.TestCase):
+ vol_migrating_src_host = {
+ 'volume': {'migration_status': 'migrating',
+ 'os-vol-host-attr:host': 'src_host@backend#type'}}
+ vol_migrating_dst_host = {
+ 'volume': {'migration_status': 'migrating',
+ 'os-vol-host-attr:host': 'dst_host@backend#type'}}
+ vol_migration_success = {
+ 'volume': {'migration_status': 'success',
+ 'os-vol-host-attr:host': 'dst_host@backend#type'}}
+ vol_migration_error = {
+ 'volume': {'migration_status': 'error',
+ 'os-vol-host-attr:host': 'src_host@backend#type'}}
+
+ def test_wait_for_volume_migration_timeout(self):
+ show_volume = mock.MagicMock(return_value=self.vol_migrating_src_host)
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_migration,
+ client, mock.sentinel.volume_id, 'dst_host')
+
+ def test_wait_for_volume_migration_error(self):
+ show_volume = mock.MagicMock(side_effect=[
+ self.vol_migrating_src_host,
+ self.vol_migrating_src_host,
+ self.vol_migration_error])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', return_value=0.)
+ self.patch('time.sleep')
+ self.assertRaises(lib_exc.TempestException,
+ waiters.wait_for_volume_migration,
+ client, mock.sentinel.volume_id, 'dst_host')
+
+ def test_wait_for_volume_migration_success_and_dst(self):
+ show_volume = mock.MagicMock(side_effect=[
+ self.vol_migrating_src_host,
+ self.vol_migrating_dst_host,
+ self.vol_migration_success])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', return_value=0.)
+ self.patch('time.sleep')
+ waiters.wait_for_volume_migration(
+ client, mock.sentinel.volume_id, 'dst_host')
+
+ # Assert that we wait until migration_status is success and dst_host is
+ # part of the returned os-vol-host-attr:host.
+ show_volume.assert_has_calls([mock.call(mock.sentinel.volume_id),
+ mock.call(mock.sentinel.volume_id),
+ mock.call(mock.sentinel.volume_id)])
+
+ @mock.patch.object(time, 'sleep')
+ def test_wait_for_volume_status_error_restoring(self, mock_sleep):
+ # Tests that the wait method raises VolumeRestoreErrorException if
+ # the volume status is 'error_restoring'.
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1)
+ volume1 = {'volume': {'status': 'restoring-backup'}}
+ volume2 = {'volume': {'status': 'error_restoring'}}
+ mock_show = mock.Mock(side_effect=(volume1, volume2))
+ client.show_volume = mock_show
+ volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+ self.assertRaises(exceptions.VolumeRestoreErrorException,
+ waiters.wait_for_volume_resource_status,
+ client, volume_id, 'available')
+ mock_show.assert_has_calls([mock.call(volume_id),
+ mock.call(volume_id)])
+ mock_sleep.assert_called_once_with(1)
+
+ @mock.patch.object(time, 'sleep')
+ def test_wait_for_volume_status_error_extending(self, mock_sleep):
+ # Tests that the wait method raises VolumeExtendErrorException if
+ # the volume status is 'error_extending'.
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1)
+ volume1 = {'volume': {'status': 'extending'}}
+ volume2 = {'volume': {'status': 'error_extending'}}
+ mock_show = mock.Mock(side_effect=(volume1, volume2))
+ client.show_volume = mock_show
+ volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+ self.assertRaises(exceptions.VolumeExtendErrorException,
+ waiters.wait_for_volume_resource_status,
+ client, volume_id, 'available')
+ mock_show.assert_has_calls([mock.call(volume_id),
+ mock.call(volume_id)])
+ mock_sleep.assert_called_once_with(1)
+
+ def test_wait_for_volume_attachment(self):
+ vol_detached = {'volume': {'attachments': []}}
+ vol_attached = {'volume': {'attachments': [
+ {'attachment_id': uuids.attachment_id}]}}
+ show_volume = mock.MagicMock(side_effect=[
+ vol_attached, vol_attached, vol_detached])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=5,
+ show_volume=show_volume)
+ self.patch('time.time')
+ self.patch('time.sleep')
+ waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
+ uuids.attachment_id)
+ # Assert that show volume is called until the attachment is removed.
+ show_volume.assert_has_calls = [mock.call(uuids.volume_id),
+ mock.call(uuids.volume_id),
+ mock.call(uuids.volume_id)]
+
+ def test_wait_for_volume_attachment_timeout(self):
+ show_volume = mock.MagicMock(return_value={
+ 'volume': {'attachments': [
+ {'attachment_id': uuids.attachment_id}]}})
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ # Assert that a timeout is raised if the attachment remains.
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_attachment_remove,
+ client, uuids.volume_id, uuids.attachment_id)
+
+ def test_wait_for_volume_attachment_not_present(self):
+ show_volume = mock.MagicMock(return_value={
+ 'volume': {'attachments': []}})
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
+ uuids.attachment_id)
+ # Assert that show volume is only called once before we return
+ show_volume.assert_called_once_with(uuids.volume_id)
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 1f0080f..937f93a 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -88,7 +88,7 @@
# the information using gnu/linux tools.
def _assert_exec_called_with(self, cmd):
- cmd = "set -eu -o pipefail; PATH=$PATH:/sbin; " + cmd
+ cmd = "set -eu -o pipefail; PATH=$PATH:/sbin:/usr/sbin; " + cmd
self.ssh_mock.mock.exec_command.assert_called_with(cmd)
def test_get_disks(self):
@@ -106,6 +106,16 @@
self.assertEqual(self.conn.get_disks(), result)
self._assert_exec_called_with('lsblk -lb --nodeps')
+ def test_list_disks(self):
+ output_lsblk = """\
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sda 8:0 0 128035676160 0 disk
+sdb 8:16 0 1000204886016 0 disk
+sr0 11:0 1 1073741312 0 rom"""
+ disk_list = ['sda', 'sdb']
+ self.ssh_mock.mock.exec_command.return_value = output_lsblk
+ self.assertEqual(self.conn.list_disks(), disk_list)
+
def test_get_boot_time(self):
booted_at = 10000
uptime_sec = 5000.02
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index be54130..25e99d5 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -32,6 +32,7 @@
super(ConfigFixture, self).setUp()
self.conf.set_default('build_interval', 10, group='compute')
self.conf.set_default('build_timeout', 10, group='compute')
+ self.conf.set_default('image_ref', 'fake_image_id', group='compute')
self.conf.set_default('disable_ssl_certificate_validation', True,
group='identity')
self.conf.set_default('uri', 'http://fake_uri.com/auth',
diff --git a/tempest/tests/files/setup.cfg b/tempest/tests/files/setup.cfg
index bd68708..a81d31e 100644
--- a/tempest/tests/files/setup.cfg
+++ b/tempest/tests/files/setup.cfg
@@ -3,7 +3,7 @@
version = 1
summary = Fake Project for testing wrapper scripts
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/tempest/latest/
classifier =
Intended Audience :: Information Technology
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index ebcf5d1..4723458 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -109,8 +109,8 @@
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': id, 'name': name},
- {'id': '1', 'name': 'FakeRole'},
- {'id': '2', 'name': 'Member'}]}))))
+ {'id': '1', 'name': 'FakeRole'},
+ {'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_2_roles(self):
@@ -120,8 +120,8 @@
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': '1234', 'name': 'role1'},
- {'id': '1', 'name': 'FakeRole'},
- {'id': '12345', 'name': 'role2'}]}))))
+ {'id': '1', 'name': 'FakeRole'},
+ {'id': '12345', 'name': 'role2'}]}))))
return roles_fix
def _mock_assign_user_role(self):
diff --git a/tempest/tests/lib/common/test_http.py b/tempest/tests/lib/common/test_http.py
index 02436e0..a19153f 100644
--- a/tempest/tests/lib/common/test_http.py
+++ b/tempest/tests/lib/common/test_http.py
@@ -167,3 +167,24 @@
'%s://%s:%i' % (proxy.scheme,
proxy.host,
proxy.port))
+
+
+class TestClosingHttpRedirects(base.TestCase):
+ def test_redirect_default(self):
+ connection = http.ClosingHttp()
+ self.assertTrue(connection.follow_redirects)
+
+ def test_redirect_off(self):
+ connection = http.ClosingHttp(follow_redirects=False)
+ self.assertFalse(connection.follow_redirects)
+
+
+class TestClosingProxyHttpRedirects(base.TestCase):
+ def test_redirect_default(self):
+ connection = http.ClosingProxyHttp(proxy_url=PROXY_URL)
+ self.assertTrue(connection.follow_redirects)
+
+ def test_redirect_off(self):
+ connection = http.ClosingProxyHttp(follow_redirects=False,
+ proxy_url=PROXY_URL)
+ self.assertFalse(connection.follow_redirects)
diff --git a/tempest/tests/lib/common/test_profiler.py b/tempest/tests/lib/common/test_profiler.py
new file mode 100644
index 0000000..59fa0364
--- /dev/null
+++ b/tempest/tests/lib/common/test_profiler.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import testtools
+
+from tempest.lib.common import profiler
+
+
+class TestProfiler(testtools.TestCase):
+
+ def test_serialize(self):
+ key = 'SECRET_KEY'
+ pm = {'key': key, 'uuid': 'ID'}
+
+ with mock.patch('tempest.lib.common.profiler._profiler', pm):
+ with mock.patch('json.dumps') as jdm:
+ jdm.return_value = '{"base_id": "ID", "parent_id": "ID"}'
+
+ expected = {
+ 'X-Trace-HMAC':
+ '887292df9f13b8b5ecd6bbbd2e16bfaaa4d914b0',
+ 'X-Trace-Info':
+ b'eyJiYXNlX2lkIjogIklEIiwgInBhcmVudF9pZCI6ICJJRCJ9'
+ }
+
+ self.assertEqual(expected,
+ profiler.serialize_as_http_headers())
+
+ def test_profiler_lifecycle(self):
+ key = 'SECRET_KEY'
+ uuid = 'ID'
+
+ self.assertEqual({}, profiler._profiler)
+
+ profiler.enable(key, uuid)
+ self.assertEqual({'key': key, 'uuid': uuid}, profiler._profiler)
+
+ profiler.disable()
+ self.assertEqual({}, profiler._profiler)
+
+ @mock.patch('oslo_utils.uuidutils.generate_uuid')
+ def test_profiler_lifecycle_generate_trace_id(self, generate_uuid_mock):
+ key = 'SECRET_KEY'
+ uuid = 'ID'
+ generate_uuid_mock.return_value = uuid
+
+ self.assertEqual({}, profiler._profiler)
+
+ profiler.enable(key)
+ self.assertEqual({'key': key, 'uuid': uuid}, profiler._profiler)
+
+ profiler.disable()
+ self.assertEqual({}, profiler._profiler)
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index 4c0bb57..b861582 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -13,10 +13,10 @@
# under the License.
import copy
-import json
import fixtures
import jsonschema
+from oslo_serialization import jsonutils as json
import six
from tempest.lib.common import http
diff --git a/tempest/tests/lib/fake_identity.py b/tempest/tests/lib/fake_identity.py
index 8bae34f..9d7b0fd 100644
--- a/tempest/tests/lib/fake_identity.py
+++ b/tempest/tests/lib/fake_identity.py
@@ -192,7 +192,7 @@
def _fake_v3_response(self, uri, method="GET", body=None, headers=None,
- redirections=5, connection_type=None):
+ redirections=5, connection_type=None, log_req_body=None):
fake_headers = {
"x-subject-token": TOKEN
}
@@ -202,7 +202,7 @@
def _fake_v3_response_domain_scope(self, uri, method="GET", body=None,
headers=None, redirections=5,
- connection_type=None):
+ connection_type=None, log_req_body=None):
fake_headers = {
"status": "201",
"x-subject-token": TOKEN
@@ -213,7 +213,7 @@
def _fake_v3_response_no_scope(self, uri, method="GET", body=None,
headers=None, redirections=5,
- connection_type=None):
+ connection_type=None, log_req_body=None):
fake_headers = {
"status": "201",
"x-subject-token": TOKEN
@@ -223,7 +223,7 @@
def _fake_v2_response(self, uri, method="GET", body=None, headers=None,
- redirections=5, connection_type=None):
+ redirections=5, connection_type=None, log_req_body=None):
return (fake_http.fake_http_response({}, status=200),
json.dumps(IDENTITY_V2_RESPONSE))
diff --git a/tempest/tests/lib/services/compute/test_images_client.py b/tempest/tests/lib/services/compute/test_images_client.py
index c2c3b76..d1500e5 100644
--- a/tempest/tests/lib/services/compute/test_images_client.py
+++ b/tempest/tests/lib/services/compute/test_images_client.py
@@ -186,15 +186,19 @@
def _test_resource_deleted(self, bytes_body=False):
params = {"id": self.FAKE_IMAGE_ID}
expected_op = self.FAKE_IMAGE_DATA['show']
- self.useFixture(fixtures.MockPatch('tempest.lib.services.compute'
- '.images_client.ImagesClient.show_image',
- side_effect=lib_exc.NotFound))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'tempest.lib.services.compute'
+ '.images_client.ImagesClient.show_image',
+ side_effect=lib_exc.NotFound))
self.assertEqual(True, self.client.is_resource_deleted(**params))
tempdata = copy.deepcopy(self.FAKE_IMAGE_DATA['show'])
tempdata['image']['id'] = None
- self.useFixture(fixtures.MockPatch('tempest.lib.services.compute'
- '.images_client.ImagesClient.show_image',
- return_value=expected_op))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'tempest.lib.services.compute'
+ '.images_client.ImagesClient.show_image',
+ return_value=expected_op))
self.assertEqual(False, self.client.is_resource_deleted(**params))
def test_list_images_with_str_body(self):
diff --git a/tempest/tests/lib/services/identity/v2/test_token_client.py b/tempest/tests/lib/services/identity/v2/test_token_client.py
index dfce9b3..5b4e210 100644
--- a/tempest/tests/lib/services/identity/v2/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v2/test_token_client.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
+from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
@@ -87,6 +86,9 @@
with mock.patch.object(token_client_v2, 'raw_request') as mock_raw_r:
mock_raw_r.return_value = response, body
resp, body = token_client_v2.request('GET', 'fake_uri')
+ mock_raw_r.assert_called_once_with('fake_uri', 'GET',
+ headers=mock.ANY, body=None,
+ log_req_body='<omitted>')
self.assertIsInstance(body, dict)
def test_request_with_bytes_body(self):
diff --git a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
index 9bf9b68..8aed7d7 100644
--- a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
@@ -20,78 +20,116 @@
class TestApplicationCredentialsClient(base.BaseServiceTest):
FAKE_CREATE_APP_CRED = {
"application_credential": {
- "description": "fake application credential",
+ "name": "monitoring",
+ "secret": "rEaqvJka48mpv",
+ "description": "Application credential for monitoring.",
+ "expires_at": "2018-02-27T18:30:59Z",
"roles": [
+ {"name": "Reader"}
+ ],
+ "access_rules": [
{
- "id": "c60fdd45",
- "domain_id": None,
- "name": "Member"
+ "path": "/v2.0/metrics",
+ "method": "GET",
+ "service": "monitoring"
}
],
- "expires_at": "2019-02-27T18:30:59.999999Z",
- "secret": "_BVq0xU5L",
- "unrestricted": None,
- "project_id": "ddef321",
- "id": "5499a186",
- "name": "one"
+ "unrestricted": False
}
}
FAKE_LIST_APP_CREDS = {
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/application_credentials",
+ "previous": None,
+ "next": None
+ },
"application_credentials": [
{
- "description": "fake application credential",
+ "description": "Application credential for backups.",
"roles": [
{
"domain_id": None,
- "name": "Member",
- "id": "c60fdd45",
+ "name": "Writer",
+ "id": "6aff702516544aeca22817fd3bc39683"
}
],
- "expires_at": "2018-02-27T18:30:59.999999Z",
- "unrestricted": None,
- "project_id": "ddef321",
- "id": "5499a186",
- "name": "one"
+ "access_rules": [
+ ],
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/" +
+ "application_credentials/" +
+ "308a7e905eee4071aac5971744c061f6"
+ },
+ "expires_at": "2018-02-27T18:30:59.000000",
+ "unrestricted": False,
+ "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+ "id": "308a7e905eee4071aac5971744c061f6",
+ "name": "backups"
},
{
- "description": None,
+ "description": "Application credential for monitoring.",
"roles": [
{
- "id": "0f1837c8",
+ "id": "6aff702516544aeca22817fd3bc39683",
"domain_id": None,
- "name": "anotherrole"
- },
- {
- "id": "c60fdd45",
- "domain_id": None,
- "name": "Member"
+ "name": "Reader"
}
],
- "expires_at": None,
- "unrestricted": None,
- "project_id": "c5403d938",
- "id": "d441c904f",
- "name": "two"
+ "access_rules": [
+ {
+ "path": "/v2.0/metrics",
+ "id": "07d719df00f349ef8de77d542edf010c",
+ "service": "monitoring",
+ "method": "GET"
+ }
+ ],
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/" +
+ "application_credentials/" +
+ "58d61ff8e6e34accb35874016d1dba8b"
+ },
+ "expires_at": "2018-02-27T18:30:59.000000",
+ "unrestricted": False,
+ "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+ "id": "58d61ff8e6e34accb35874016d1dba8b",
+ "name": "monitoring"
}
]
}
FAKE_APP_CRED_INFO = {
"application_credential": {
- "description": None,
+ "description": "Application credential for monitoring.",
"roles": [
{
+ "id": "6aff702516544aeca22817fd3bc39683",
"domain_id": None,
- "name": "Member",
- "id": "c60fdd45",
+ "name": "Reader"
}
],
- "expires_at": None,
- "unrestricted": None,
- "project_id": "ddef321",
- "id": "5499a186",
- "name": "one"
+ "access_rules": [
+ {
+ "path": "/v2.0/metrics",
+ "id": "07d719df00f349ef8de77d542edf010c",
+ "service": "monitoring",
+ "method": "GET"
+ }
+ ],
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/" +
+ "application_credentials/" +
+ "58d61ff8e6e34accb35874016d1dba8b"
+ },
+ "expires_at": "2018-02-27T18:30:59.000000",
+ "unrestricted": False,
+ "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+ "id": "58d61ff8e6e34accb35874016d1dba8b",
+ "name": "monitoring"
}
}
@@ -118,7 +156,7 @@
self.FAKE_APP_CRED_INFO,
bytes_body,
user_id="123456",
- application_credential_id="5499a186")
+ application_credential_id="58d61ff8e6e34accb35874016d1dba8b")
def _test_list_app_creds(self, bytes_body=False):
self.check_service_client_function(
@@ -146,11 +184,11 @@
def test_list_application_credential_with_bytes_body(self):
self._test_list_app_creds(bytes_body=True)
- def test_delete_trust(self):
+ def test_delete_application_credential(self):
self.check_service_client_function(
self.client.delete_application_credential,
'tempest.lib.common.rest_client.RestClient.delete',
{},
user_id="123456",
- application_credential_id="5499a186",
+ application_credential_id="58d61ff8e6e34accb35874016d1dba8b",
status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_groups_client.py b/tempest/tests/lib/services/identity/v3/test_groups_client.py
index 38cf3ae..e3c9851 100644
--- a/tempest/tests/lib/services/identity/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_groups_client.py
@@ -211,3 +211,13 @@
group_id='6e13e2068cf9466e98950595baf6bb35',
user_id='642688fa65a84217b86cef3c063de2b9',
)
+
+ def test_delete_group_user(self):
+ self.check_service_client_function(
+ self.client.delete_group_user,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ status=204,
+ group_id='6e13e2068cf9466e98950595baf6bb35',
+ user_id='642688fa65a84217b86cef3c063de2b9',
+ )
diff --git a/tempest/tests/lib/services/identity/v3/test_projects_client.py b/tempest/tests/lib/services/identity/v3/test_projects_client.py
index 6ffbcde..d26de06 100644
--- a/tempest/tests/lib/services/identity/v3/test_projects_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_projects_client.py
@@ -62,7 +62,8 @@
"/0c4e939acacf4376bdcd1129f1a054ad"
},
"name": "admin",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
},
{
"is_domain": False,
@@ -75,7 +76,8 @@
"/0cbd49cbf76d405d9c86562e1d579bd3"
},
"name": "demo",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
},
{
"is_domain": False,
@@ -88,7 +90,8 @@
"/2db68fed84324f29bb73130c6c2094fb"
},
"name": "swifttenanttest2",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
},
{
"is_domain": False,
@@ -101,7 +104,8 @@
"/3d594eb0f04741069dbbb521635b21c7"
},
"name": "service",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
}
]
}
diff --git a/tempest/tests/lib/services/identity/v3/test_token_client.py b/tempest/tests/lib/services/identity/v3/test_token_client.py
index 38e8c4a..656e10a 100644
--- a/tempest/tests/lib/services/identity/v3/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_token_client.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
+from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
@@ -137,6 +136,9 @@
mock_raw_r.return_value = (
fake_identity._fake_v3_response(None, None))
resp, body = token_client_v3.request('GET', 'fake_uri')
+ mock_raw_r.assert_called_once_with('fake_uri', 'GET',
+ headers=mock.ANY, body=None,
+ log_req_body='<omitted>')
self.assertIsInstance(body, dict)
diff --git a/tempest/tests/lib/services/identity/v3/test_users_client.py b/tempest/tests/lib/services/identity/v3/test_users_client.py
index 5b572f5..c0dfdae 100644
--- a/tempest/tests/lib/services/identity/v3/test_users_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_users_client.py
@@ -25,6 +25,11 @@
'enabled': True,
'name': 'Tempest User',
'password': 'TempestPassword',
+ "description": "Tempest User",
+ "email": "TempestUser@example.com",
+ "options": {
+ "ignore_password_expiry": True
+ }
}
}
@@ -104,6 +109,38 @@
]
}
+ FAKE_PROJECT_LIST = {
+ "links": {
+ "self": "http://example.com/identity/v3/users/313233/projects",
+ "previous": None,
+ "next": None
+ },
+ "projects": [
+ {
+ "description": "description of this project",
+ "domain_id": "161718",
+ "enabled": True,
+ "id": "456788",
+ "links": {
+ "self": "http://example.com/identity/v3/projects/456788"
+ },
+ "name": "a project name",
+ "parent_id": "212223"
+ },
+ {
+ "description": "description of this project",
+ "domain_id": "161718",
+ "enabled": True,
+ "id": "456789",
+ "links": {
+ "self": "http://example.com/identity/v3/projects/456789"
+ },
+ "name": "another domain",
+ "parent_id": "212223"
+ },
+ ]
+ }
+
def setUp(self):
super(TestUsersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -155,6 +192,15 @@
user_id='817fb3c23fd7465ba6d7fe1b1320121d',
)
+ def _test_list_user_projects(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_user_projects,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_PROJECT_LIST,
+ bytes_body,
+ user_id='817fb3c23fd7465ba6d7fe1b1320121d',
+ )
+
def test_create_user_with_string_body(self):
self._test_create_user()
@@ -185,6 +231,12 @@
def test_list_user_groups_with_bytes_body(self):
self._test_list_user_groups(bytes_body=True)
+ def test_list_user_projects_with_string_body(self):
+ self._test_list_user_projects()
+
+ def test_list_user_projects_with_bytes_body(self):
+ self._test_list_user_projects(bytes_body=True)
+
def test_delete_user(self):
self.check_service_client_function(
self.client.delete_user,
diff --git a/tempest/tests/lib/services/image/v2/test_image_members_client.py b/tempest/tests/lib/services/image/v2/test_image_members_client.py
index 703b6e1..2caa567 100644
--- a/tempest/tests/lib/services/image/v2/test_image_members_client.py
+++ b/tempest/tests/lib/services/image/v2/test_image_members_client.py
@@ -27,6 +27,28 @@
"schema": "/v2/schemas/member"
}
+ FAKE_LIST_IMAGE_MEMBERS = {
+ "members": [
+ {
+ "created_at": "2013-10-07T17:58:03Z",
+ "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe",
+ "member_id": "123456789",
+ "schema": "/v2/schemas/member",
+ "status": "pending",
+ "updated_at": "2013-10-07T17:58:03Z"
+ },
+ {
+ "created_at": "2013-10-07T17:58:55Z",
+ "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe",
+ "member_id": "987654321",
+ "schema": "/v2/schemas/member",
+ "status": "accepted",
+ "updated_at": "2013-10-08T12:08:55Z"
+ }
+ ],
+ "schema": "/v2/schemas/members"
+ }
+
def setUp(self):
super(TestImageMembersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -34,6 +56,14 @@
'image',
'regionOne')
+ def _test_list_image_members(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_image_members,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_IMAGE_MEMBERS,
+ bytes_body,
+ image_id="dbc999e3-c52f-4200-bedd-3b18fe7f87fe")
+
def _test_show_image_member(self, bytes_body=False):
self.check_service_client_function(
self.client.show_image_member,
@@ -62,6 +92,12 @@
member_id="8989447062e04a818baf9e073fd04fa7",
schema="/v2/schemas/member2")
+ def test_list_image_members_with_str_body(self):
+ self._test_list_image_members()
+
+ def test_list_image_members_with_bytes_body(self):
+ self._test_list_image_members(bytes_body=True)
+
def test_show_image_member_with_str_body(self):
self._test_show_image_member()
diff --git a/tempest/tests/lib/services/image/v2/test_images_client.py b/tempest/tests/lib/services/image/v2/test_images_client.py
index ee4d4cb..fe671bd 100644
--- a/tempest/tests/lib/services/image/v2/test_images_client.py
+++ b/tempest/tests/lib/services/image/v2/test_images_client.py
@@ -35,14 +35,19 @@
"created_at": "2012-08-10T19:23:50Z",
"updated_at": "2012-08-12T11:11:33Z",
"self": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea",
- "file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/file",
+ "file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927"
+ "dea/file",
"schema": "/v2/schemas/image",
"owner": None,
"min_ram": None,
"min_disk": None,
"disk_format": None,
"virtual_size": None,
- "container_format": None
+ "container_format": None,
+ "os_hash_algo": "sha512",
+ "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
+ "os_hidden": False,
+ "protected": False,
}
FAKE_LIST_IMAGES = {
@@ -66,7 +71,10 @@
"size": 13167616,
"min_ram": 0,
"schema": "/v2/schemas/image",
- "virtual_size": None
+ "virtual_size": None,
+ "os_hash_algo": "sha512",
+ "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
+ "os_hidden": False
},
{
"status": "active",
@@ -87,7 +95,10 @@
"size": 476704768,
"min_ram": 0,
"schema": "/v2/schemas/image",
- "virtual_size": None
+ "virtual_size": None,
+ "os_hash_algo": "sha512",
+ "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
+ "os_hidden": False
}
],
"schema": "/v2/schemas/images",
diff --git a/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py b/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
index 2faa5be..6b282f4 100644
--- a/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
@@ -118,9 +118,17 @@
def test_show_namespace_tag_with_bytes_body(self):
self._test_show_namespace_tag_definition(bytes_body=True)
+ def test_delete_namespace_tag_definition(self):
+ self.check_service_client_function(
+ self.client.delete_namespace_tag,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=204,
+ namespace="OS::Compute::Hypervisor",
+ tag_name="added-sample-tag")
+
def test_delete_all_namespace_tags(self):
self.check_service_client_function(
self.client.delete_namespace_tags,
'tempest.lib.common.rest_client.RestClient.delete',
- {}, status=200,
+ {}, status=204,
namespace="OS::Compute::Hypervisor")
diff --git a/tempest/tests/lib/services/image/v2/test_namespaces_client.py b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
index 4cb9d01..db1ffae 100644
--- a/tempest/tests/lib/services/image/v2/test_namespaces_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
@@ -18,12 +18,105 @@
class TestNamespacesClient(base.BaseServiceTest):
- FAKE_CREATE_SHOW_NAMESPACE = {
- "namespace": "OS::Compute::Hypervisor",
- "visibility": "public",
- "description": "Tempest",
- "display_name": u"\u2740(*\xb4\u25e1`*)\u2740",
- "protected": True
+ FAKE_CREATE_NAMESPACE = {
+ "created_at": "2016-05-19T16:05:48Z",
+ "description": "A metadata definitions namespace.",
+ "display_name": "An Example Namespace",
+ "namespace": "FredCo::SomeCategory::Example",
+ "owner": "c60b1d57c5034e0d86902aedf8c49be0",
+ "protected": True,
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/"
+ "FredCo::SomeCategory::Example",
+ "updated_at": "2016-05-19T16:05:48Z",
+ "visibility": "public"
+ }
+
+ FAKE_SHOW_NAMESPACE = {
+ "created_at": "2016-06-28T14:57:10Z",
+ "description": "The libvirt compute driver options.",
+ "display_name": "libvirt Driver Options",
+ "namespace": "OS::Compute::Libvirt",
+ "owner": "admin",
+ "properties": {
+ "boot_menu": {
+ "description": "If true, enables the BIOS bootmenu.",
+ "enum": [
+ "true",
+ "false"
+ ],
+ "title": "Boot Menu",
+ "type": "string"
+ },
+ "serial_port_count": {
+ "description": "Specifies the count of serial ports.",
+ "minimum": 0,
+ "title": "Serial Port Count",
+ "type": "integer"
+ }
+ },
+ "protected": True,
+ "resource_type_associations": [
+ {
+ "created_at": "2016-06-28T14:57:10Z",
+ "name": "OS::Glance::Image",
+ "prefix": "hw_"
+ },
+ {
+ "created_at": "2016-06-28T14:57:10Z",
+ "name": "OS::Nova::Flavor",
+ "prefix": "hw:"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt",
+ "visibility": "public"
+ }
+
+ FAKE_LIST_NAMESPACES = {
+ "first": "/v2/metadefs/namespaces?sort_key=created_at&"
+ "sort_dir=asc",
+ "namespaces": [
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "description": "OS::Compute::Libvirt",
+ "display_name": "libvirt Driver Options",
+ "namespace": "OS::Compute::Libvirt",
+ "owner": "admin",
+ "protected": True,
+ "resource_type_associations": [
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "name": "OS::Glance::Image",
+ "updated_at": "2014-08-28T17:13:06Z"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt",
+ "updated_at": "2014-08-28T17:13:06Z",
+ "visibility": "public"
+ },
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "description": "OS::Compute::Quota",
+ "display_name": "Flavor Quota",
+ "namespace": "OS::Compute::Quota",
+ "owner": "admin",
+ "protected": True,
+ "resource_type_associations": [
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "name": "OS::Nova::Flavor",
+ "updated_at": "2014-08-28T17:13:06Z"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Quota",
+ "updated_at": "2014-08-28T17:13:06Z",
+ "visibility": "public"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespaces"
}
FAKE_UPDATE_NAMESPACE = {
@@ -44,15 +137,22 @@
self.check_service_client_function(
self.client.show_namespace,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_CREATE_SHOW_NAMESPACE,
+ self.FAKE_SHOW_NAMESPACE,
bytes_body,
namespace="OS::Compute::Hypervisor")
+ def _test_list_namespaces(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_namespaces,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_NAMESPACES,
+ bytes_body)
+
def _test_create_namespace(self, bytes_body=False):
self.check_service_client_function(
self.client.create_namespace,
'tempest.lib.common.rest_client.RestClient.post',
- self.FAKE_CREATE_SHOW_NAMESPACE,
+ self.FAKE_CREATE_NAMESPACE,
bytes_body,
namespace="OS::Compute::Hypervisor",
visibility="public", description="Tempest",
@@ -74,6 +174,12 @@
def test_show_namespace_with_bytes_body(self):
self._test_show_namespace(bytes_body=True)
+ def test_list_namespaces_with_str_body(self):
+ self._test_list_namespaces()
+
+ def test_list_namespaces_with_bytes_body(self):
+ self._test_list_namespaces(bytes_body=True)
+
def test_create_namespace_with_str_body(self):
self._test_create_namespace()
diff --git a/tempest/tests/lib/services/image/v2/test_resource_types_client.py b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
index 2e3b117..089e62e 100644
--- a/tempest/tests/lib/services/image/v2/test_resource_types_client.py
+++ b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
@@ -17,7 +17,7 @@
from tempest.tests.lib.services import base
-class TestResouceTypesClient(base.BaseServiceTest):
+class TestResourceTypesClient(base.BaseServiceTest):
FAKE_LIST_RESOURCETYPES = {
"resource_types": [
{
@@ -48,22 +48,84 @@
]
}
+ FAKE_CREATE_RESOURCE_TYPE_ASSOCIATION = {
+ "created_at": "2020-03-07T18:20:44Z",
+ "name": "OS::Glance::Image",
+ "prefix": "hw:",
+ "updated_at": "2020-03-07T18:20:44Z"
+ }
+
+ FAKE_LIST_RESOURCE_TYPE_ASSOCIATION = {
+ "resource_type_associations": [
+ {
+ "created_at": "2020-03-07T18:20:44Z",
+ "name": "OS::Nova::Flavor",
+ "prefix": "hw:"
+ },
+ {
+ "created_at": "2020-03-07T18:20:44Z",
+ "name": "OS::Glance::Image",
+ "prefix": "hw_"
+ }
+ ]
+ }
+
def setUp(self):
- super(TestResouceTypesClient, self).setUp()
+ super(TestResourceTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = resource_types_client.ResourceTypesClient(fake_auth,
'image',
'regionOne')
- def _test_list_resouce_types(self, bytes_body=False):
+ def _test_list_resource_types(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_types,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_RESOURCETYPES,
bytes_body)
- def test_list_resouce_types_with_str_body(self):
- self._test_list_resouce_types()
+ def _test_create_resource_type_association(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_resource_type_association,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_RESOURCE_TYPE_ASSOCIATION,
+ bytes_body, status=201,
+ namespace_id="OS::Compute::Hypervisor",
+ name="OS::Glance::Image", prefix="hw_",
+ )
- def test_list_resouce_types_with_bytes_body(self):
- self._test_list_resouce_types(bytes_body=True)
+ def _test_list_resource_type_association(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_resource_type_association,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_RESOURCE_TYPE_ASSOCIATION,
+ bytes_body,
+ namespace_id="OS::Compute::Hypervisor",
+ )
+
+ def test_list_resource_types_with_str_body(self):
+ self._test_list_resource_types()
+
+ def test_list_resource_types_with_bytes_body(self):
+ self._test_list_resource_types(bytes_body=True)
+
+ def test_delete_resource_type_association(self):
+ self.check_service_client_function(
+ self.client.delete_resource_type_association,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=204,
+ namespace_id="OS::Compute::Hypervisor",
+ resource_name="OS::Glance::Image",
+ )
+
+ def test_create_resource_type_association_with_str_body(self):
+ self._test_create_resource_type_association()
+
+ def test_create_resource_type_association_with_bytes_body(self):
+ self._test_create_resource_type_association(bytes_body=True)
+
+ def test_list_resource_type_association_with_str_body(self):
+ self._test_list_resource_type_association()
+
+ def test_list_resource_type_association_with_bytes_body(self):
+ self._test_list_resource_type_association(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_agents_client.py b/tempest/tests/lib/services/network/test_agents_client.py
new file mode 100644
index 0000000..8904882
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_agents_client.py
@@ -0,0 +1,170 @@
+# Copyright 2018 AT&T Corporation.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import agents_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestAgentsClient(base.BaseServiceTest):
+
+ FAKE_AGENT_ID = "d32019d3-bc6e-4319-9c1d-6123f4135a88"
+
+ FAKE_LIST_DATA = {
+ "agents": [
+ {
+ "binary": "neutron-dhcp-agent",
+ "description": None,
+ "availability_zone": "nova",
+ "heartbeat_timestamp": "2017-09-12 19:39:56",
+ "admin_state_up": True,
+ "alive": True,
+ "id": "840d5d68-5759-4e9e-812f",
+ "topic": "dhcp_agent",
+ "host": "agenthost1",
+ "agent_type": "DHCP agent",
+ "started_at": "2017-09-12 19:35:36",
+ "created_at": "2017-09-12 19:35:36",
+ "resources_synced": None,
+ "configurations": {
+ "subnets": 2,
+ "dhcp_lease_duration": 86400,
+ "dhcp_driver": "neutron.agent",
+ "networks": 1,
+ "log_agent_heartbeats": False,
+ "ports": 3
+ }
+ }
+ ]
+ }
+
+ FAKE_SHOW_DATA = {
+ "agent": {
+ "binary": "neutron-openvswitch-agent",
+ "description": None,
+ "availability_zone": None,
+ "heartbeat_timestamp": "2017-09-12 19:40:38",
+ "admin_state_up": True,
+ "alive": True,
+ "id": "04c62b91-b799-48b7-9cd5-2982db6df9c6",
+ "topic": "N/A",
+ "host": "agenthost1",
+ "agent_type": "Open vSwitch agent",
+ "started_at": "2017-09-12 19:35:38",
+ "created_at": "2017-09-12 19:35:38",
+ "resources_synced": True,
+ "configurations": {
+ "ovs_hybrid_plug": True,
+ "in_distributed_mode": False,
+ "datapath_type": "system",
+ "vhostuser_socket_dir": "/var/run/openvswitch",
+ "tunneling_ip": "172.16.78.191",
+ "arp_responder_enabled": False,
+ "devices": 0,
+ "ovs_capabilities": {
+ "datapath_types": [
+ "netdev",
+ "system"
+ ],
+ "iface_types": [
+ "geneve",
+ "gre",
+ "internal",
+ "ipsec_gre",
+ "lisp",
+ "patch",
+ "stt",
+ "system",
+ "tap",
+ "vxlan"
+ ]
+ },
+ "log_agent_heartbeats": False,
+ "l2_population": False,
+ "tunnel_types": [
+ "vxlan"
+ ],
+ "extensions": [],
+ "enable_distributed_routing": False,
+ "bridge_mappings": {
+ "public": "br-ex"
+ }
+ }
+ }
+ }
+
+ FAKE_UPDATE_DATA = {
+ "agent": {
+ "description": "My OVS agent for OpenStack"
+ }
+ }
+
+ def setUp(self):
+ super(TestAgentsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.agents_client = agents_client.AgentsClient(
+ fake_auth, "network", "regionOne")
+
+ def _test_show_agent(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.show_agent,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_SHOW_DATA,
+ bytes_body,
+ status=200,
+ agent_id=self.FAKE_AGENT_ID)
+
+ def _test_update_agent(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.update_agent,
+ "tempest.lib.common.rest_client.RestClient.put",
+ self.FAKE_UPDATE_DATA,
+ bytes_body,
+ status=200,
+ agent_id=self.FAKE_AGENT_ID)
+
+ def _test_list_agents(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.list_agents,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_LIST_DATA,
+ bytes_body,
+ status=200)
+
+ def test_show_agent_with_str_body(self):
+ self._test_show_agent()
+
+ def test_show_agent_with_bytes_body(self):
+ self._test_show_agent(bytes_body=True)
+
+ def test_update_agent_with_str_body(self):
+ self._test_update_agent()
+
+ def test_update_agent_with_bytes_body(self):
+ self._test_update_agent(bytes_body=True)
+
+ def test_list_agent_with_str_body(self):
+ self._test_list_agents()
+
+ def test_list_agent_with_bytes_body(self):
+ self._test_list_agents(bytes_body=True)
+
+ def test_delete_agent(self):
+ self.check_service_client_function(
+ self.agents_client.delete_agent,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ agent_id=self.FAKE_AGENT_ID)
diff --git a/tempest/tests/lib/services/network/test_qos_client.py b/tempest/tests/lib/services/network/test_qos_client.py
new file mode 100644
index 0000000..b04b847
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_client.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import qos_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestQosClient(base.BaseServiceTest):
+
+ FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+
+ FAKE_QOS_POLICY_REQUEST = {
+ 'name': 'foo',
+ 'shared': True
+ }
+
+ FAKE_QOS_POLICY_RESPONSE = {
+ 'policy': {
+ "name": "10Mbit",
+ "description": "This policy limits the ports to 10Mbit max.",
+ "rules": [],
+ "id": FAKE_QOS_POLICY_ID,
+ "is_default": False,
+ "project_id": "8d4c70a21fed4aeba121a1a429ba0d04",
+ "revision_number": 1,
+ "tenant_id": "8d4c70a21fed4aeba121a1a429ba0d04",
+ "created_at": "2018-04-03T21:26:39Z",
+ "updated_at": "2018-04-03T21:26:39Z",
+ "shared": False,
+ "tags": ["tag1,tag2"]
+ }
+ }
+
+ FAKE_QOS_POLICIES = {
+ 'policies': [
+ FAKE_QOS_POLICY_RESPONSE['policy']
+ ]
+ }
+
+ def setUp(self):
+ super(TestQosClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.qos_client = qos_client.QosClient(
+ fake_auth, "network", "regionOne")
+
+ def _test_create_qos_policy(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_client.create_qos_policy,
+ "tempest.lib.common.rest_client.RestClient.post",
+ self.FAKE_QOS_POLICY_RESPONSE,
+ bytes_body,
+ 201,
+ **self.FAKE_QOS_POLICY_REQUEST)
+
+ def _test_list_qos_policies(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_client.list_qos_policies,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_QOS_POLICIES,
+ bytes_body,
+ 200)
+
+ def _test_show_qos_policy(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_client.show_qos_policy,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_QOS_POLICY_RESPONSE,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID)
+
+ def _test_update_qos_polcy(self, bytes_body=False):
+ update_kwargs = {
+ "name": "100Mbit",
+ "description": "This policy limits the ports to 100Mbit max.",
+ "shared": True
+ }
+
+ resp_body = {
+ "policy": copy.deepcopy(
+ self.FAKE_QOS_POLICY_RESPONSE['policy']
+ )
+ }
+ resp_body["policy"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.qos_client.update_qos_policy,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ **update_kwargs)
+
+ def test_create_qos_policy_with_str_body(self):
+ self._test_create_qos_policy()
+
+ def test_create_qos_policy_with_bytes_body(self):
+ self._test_create_qos_policy(bytes_body=True)
+
+ def test_update_qos_policy_with_str_body(self):
+ self._test_update_qos_polcy()
+
+ def test_update_qos_policy_with_bytes_body(self):
+ self._test_update_qos_polcy(bytes_body=True)
+
+ def test_show_qos_policy_with_str_body(self):
+ self._test_show_qos_policy()
+
+ def test_show_qos_policy_with_bytes_body(self):
+ self._test_show_qos_policy(bytes_body=True)
+
+ def test_delete_qos_policy(self):
+ self.check_service_client_function(
+ self.qos_client.delete_qos_policy,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID)
+
+ def test_list_qos_policies_with_str_body(self):
+ self._test_list_qos_policies()
+
+ def test_list_qos_policies_with_bytes_body(self):
+ self._test_list_qos_policies(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py b/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py
new file mode 100644
index 0000000..8234dda
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import qos_minimum_bandwidth_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestQosMinimumBandwidthRulesClient(base.BaseServiceTest):
+
+ FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+ FAKE_MIN_BW_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+ FAKE_MIN_BW_RULE_REQUEST = {
+ 'qos_policy_id': FAKE_QOS_POLICY_ID,
+ 'min_kbps': 1000,
+ 'direction': 'ingress'
+ }
+
+ FAKE_MIN_BW_RULE_RESPONSE = {
+ 'minimum_bandwidth_rule': {
+ 'id': FAKE_MIN_BW_RULE_ID,
+ 'min_kbps': 10000,
+ 'direction': 'egress'
+ }
+ }
+
+ FAKE_MIN_BW_RULES = {
+ 'bandwidth_limit_rules': [
+ FAKE_MIN_BW_RULE_RESPONSE['minimum_bandwidth_rule']
+ ]
+ }
+
+ def setUp(self):
+ super(TestQosMinimumBandwidthRulesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.qos_min_bw_client = qos_minimum_bandwidth_rules_client.\
+ QosMinimumBandwidthRulesClient(fake_auth, "network", "regionOne")
+
+ def _test_create_minimum_bandwidth_rule(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_min_bw_client.create_minimum_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.post",
+ self.FAKE_MIN_BW_RULE_RESPONSE,
+ bytes_body,
+ 201,
+ **self.FAKE_MIN_BW_RULE_REQUEST
+ )
+
+ def _test_list_minimum_bandwidth_rules(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_min_bw_client.list_minimum_bandwidth_rules,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_MIN_BW_RULES,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID
+ )
+
+ def _test_show_minimum_bandwidth_rule(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_min_bw_client.show_minimum_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_MIN_BW_RULE_RESPONSE,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ rule_id=self.FAKE_MIN_BW_RULE_ID
+ )
+
+ def _test_update_qos_polcy(self, bytes_body=False):
+ update_kwargs = {
+ "min_kbps": "20000"
+ }
+
+ resp_body = {
+ "minimum_bandwidth_rule": copy.deepcopy(
+ self.FAKE_MIN_BW_RULE_RESPONSE['minimum_bandwidth_rule']
+ )
+ }
+ resp_body["minimum_bandwidth_rule"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.qos_min_bw_client.update_minimum_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ rule_id=self.FAKE_MIN_BW_RULE_ID,
+ **update_kwargs)
+
+ def test_create_minimum_bandwidth_rule_with_str_body(self):
+ self._test_create_minimum_bandwidth_rule()
+
+ def test_create_minimum_bandwidth_rule_with_bytes_body(self):
+ self._test_create_minimum_bandwidth_rule(bytes_body=True)
+
+ def test_update_minimum_bandwidth_rule_with_str_body(self):
+ self._test_update_qos_polcy()
+
+ def test_update_minimum_bandwidth_rule_with_bytes_body(self):
+ self._test_update_qos_polcy(bytes_body=True)
+
+ def test_show_minimum_bandwidth_rule_with_str_body(self):
+ self._test_show_minimum_bandwidth_rule()
+
+ def test_show_minimum_bandwidth_rule_with_bytes_body(self):
+ self._test_show_minimum_bandwidth_rule(bytes_body=True)
+
+ def test_delete_minimum_bandwidth_rule(self):
+ self.check_service_client_function(
+ self.qos_min_bw_client.delete_minimum_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ rule_id=self.FAKE_MIN_BW_RULE_ID)
+
+ def test_list_minimum_bandwidth_rule_with_str_body(self):
+ self._test_list_minimum_bandwidth_rules()
+
+ def test_list_minimum_bandwidth_rule_with_bytes_body(self):
+ self._test_list_minimum_bandwidth_rules(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_routers_client.py b/tempest/tests/lib/services/network/test_routers_client.py
index 2fa5993..f5dcc7d 100644
--- a/tempest/tests/lib/services/network/test_routers_client.py
+++ b/tempest/tests/lib/services/network/test_routers_client.py
@@ -20,37 +20,78 @@
class TestRoutersClient(base.BaseServiceTest):
FAKE_CREATE_ROUTER = {
"router": {
- "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
"external_gateway_info": {
- "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b",
"enable_snat": True,
"external_fixed_ips": [
{
- "subnet_id": "255.255.255.0",
- "ip": "192.168.10.1"
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
}
- ]
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
- "admin_state_up": True,
- "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "routes": [],
+ "revision_number": 1,
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": None,
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
}
}
FAKE_UPDATE_ROUTER = {
"router": {
- "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
"external_gateway_info": {
- "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b",
"enable_snat": True,
"external_fixed_ips": [
{
- "subnet_id": "255.255.255.0",
- "ip": "192.168.10.1"
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
}
- ]
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
- "admin_state_up": False,
- "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "revision_number": 3,
+ "routes": [
+ {
+ "destination": "179.24.1.0/24",
+ "nexthop": "172.24.3.99"
+ }
+ ],
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": None,
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
}
}
diff --git a/tempest/tests/lib/services/network/test_segments_client.py b/tempest/tests/lib/services/network/test_segments_client.py
new file mode 100644
index 0000000..579c78f
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_segments_client.py
@@ -0,0 +1,140 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import segments_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestSegmentsClient(base.BaseServiceTest):
+
+ FAKE_SEGMENT_ID = '83a59912-a473-11e9-a012-af494c35c9c2'
+ FAKE_NETWORK_ID = '913ab0e4-a473-11e9-84a3-af1c16fc05de'
+
+ FAKE_SEGMENT_REQUEST = {
+ 'segment': {
+ 'network_id': FAKE_NETWORK_ID,
+ 'segmentation_id': 2000,
+ 'network_type': 'vlan',
+ 'physical_network': 'segment-1'
+ }
+ }
+
+ FAKE_SEGMENT_RESPONSE = {
+ 'segment': {
+ 'name': 'foo',
+ 'network_id': FAKE_NETWORK_ID,
+ 'segmentation_id': 2000,
+ 'network_type': 'vlan',
+ 'physical_network': 'segment-1',
+ 'revision_number': 1,
+ 'id': FAKE_SEGMENT_ID,
+ 'created_at': '2019-07-12T09:13:56Z',
+ 'updated_at': '2019-07-12T09:13:56Z',
+ 'description': 'bar'
+ }
+ }
+
+ FAKE_SEGMENTS = {
+ 'segments': [
+ FAKE_SEGMENT_RESPONSE['segment']
+ ]
+ }
+
+ def setUp(self):
+ super(TestSegmentsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.segments_client = segments_client.SegmentsClient(
+ fake_auth, 'compute', 'regionOne')
+
+ def _test_create_segment(self, bytes_body=False):
+ self.check_service_client_function(
+ self.segments_client.create_segment,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_SEGMENT_RESPONSE,
+ bytes_body,
+ 201,
+ **self.FAKE_SEGMENT_REQUEST['segment']
+ )
+
+ def _test_list_segments(self, bytes_body=False):
+ self.check_service_client_function(
+ self.segments_client.list_segments,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SEGMENTS,
+ bytes_body,
+ 200
+ )
+
+ def _test_show_segment(self, bytes_body=False):
+ self.check_service_client_function(
+ self.segments_client.show_segment,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SEGMENT_RESPONSE,
+ bytes_body,
+ 200,
+ segment_id=self.FAKE_SEGMENT_ID
+ )
+
+ def _test_update_segment(self, bytes_body=False):
+ update_kwargs = {
+ 'name': 'notfoo'
+ }
+
+ resp_body = {
+ 'segment': copy.deepcopy(self.FAKE_SEGMENT_RESPONSE['segment'])
+ }
+ resp_body['segment'].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.segments_client.update_segment,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ bytes_body,
+ 200,
+ segment_id=self.FAKE_SEGMENT_ID,
+ **update_kwargs
+ )
+
+ def test_create_segment_with_str_body(self):
+ self._test_create_segment()
+
+ def test_create_segment_with_bytes_body(self):
+ self._test_create_segment(bytes_body=True)
+
+ def test_update_segment_with_str_body(self):
+ self._test_update_segment()
+
+ def test_update_segment_with_bytes_body(self):
+ self._test_update_segment(bytes_body=True)
+
+ def test_show_segment_with_str_body(self):
+ self._test_show_segment()
+
+ def test_show_segment_with_bytes_body(self):
+ self._test_show_segment(bytes_body=True)
+
+ def test_delete_segment(self):
+ self.check_service_client_function(
+ self.segments_client.delete_segment,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ status=204,
+ segment_id=self.FAKE_SEGMENT_ID)
+
+ def test_list_segment_with_str_body(self):
+ self._test_list_segments()
+
+ def test_list_segment_with_bytes_body(self):
+ self._test_list_segments(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_versions_client.py b/tempest/tests/lib/services/network/test_versions_client.py
index 026dc6d..188fc31 100644
--- a/tempest/tests/lib/services/network/test_versions_client.py
+++ b/tempest/tests/lib/services/network/test_versions_client.py
@@ -12,63 +12,92 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
-
from tempest.lib.services.network import versions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestNetworkVersionsClient(base.BaseServiceTest):
-
- FAKE_INIT_VERSION = {
- "version": {
- "id": "v2.0",
- "links": [
- {
- "href": "http://openstack.example.com/v2.0/",
- "rel": "self"
- },
- {
- "href": "http://docs.openstack.org/",
- "rel": "describedby",
- "type": "text/html"
- }
- ],
- "status": "CURRENT"
- }
- }
+ VERSION = "v2.0"
FAKE_VERSIONS_INFO = {
- "versions": [FAKE_INIT_VERSION["version"]]
- }
-
- FAKE_VERSION_INFO = copy.deepcopy(FAKE_INIT_VERSION)
-
- FAKE_VERSION_INFO["version"]["media-types"] = [
- {
- "base": "application/json",
- "type": "application/vnd.openstack.network+json;version=2.0"
- }
+ "versions": [
+ {
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://openstack.example.com/%s/" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "status": "CURRENT"
+ }
]
+ }
+
+ FAKE_VERSION_DETAILS = {
+ "resources": [
+ {
+ "collection": "subnets",
+ "links": [
+ {
+ "href": "http://openstack.example.com:9696/"
+ "%s/subnets" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "name": "subnet"
+ },
+ {
+ "collection": "networks",
+ "links": [
+ {
+ "href": "http://openstack.example.com:9696/"
+ "%s/networks" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "name": "network"
+ },
+ {
+ "collection": "ports",
+ "links": [
+ {
+ "href": "http://openstack.example.com:9696/"
+ "%s/ports" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "name": "port"
+ }
+ ]
+ }
def setUp(self):
super(TestNetworkVersionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
- self.versions_client = (
- versions_client.NetworkVersionsClient
- (fake_auth, 'compute', 'regionOne'))
+ self.versions_client = versions_client.NetworkVersionsClient(
+ fake_auth, 'compute', 'regionOne')
- def _test_versions_client(self, bytes_body=False):
+ def _test_versions_client(self, func, body, bytes_body=False, **kwargs):
self.check_service_client_function(
- self.versions_client.list_versions,
- 'tempest.lib.common.rest_client.RestClient.raw_request',
- self.FAKE_VERSIONS_INFO,
- bytes_body,
- 200)
+ func, 'tempest.lib.common.rest_client.RestClient.raw_request',
+ body, bytes_body, 200, **kwargs)
def test_list_versions_client_with_str_body(self):
- self._test_versions_client()
+ self._test_versions_client(self.versions_client.list_versions,
+ self.FAKE_VERSIONS_INFO)
def test_list_versions_client_with_bytes_body(self):
- self._test_versions_client(bytes_body=True)
+ self._test_versions_client(self.versions_client.list_versions,
+ self.FAKE_VERSIONS_INFO, bytes_body=True)
+
+ def test_show_version_client_with_str_body(self):
+ self._test_versions_client(self.versions_client.show_version,
+ self.FAKE_VERSION_DETAILS,
+ version=self.VERSION)
+
+ def test_show_version_client_with_bytes_body(self):
+ self._test_versions_client(self.versions_client.show_version,
+ self.FAKE_VERSION_DETAILS, bytes_body=True,
+ version=self.VERSION)
diff --git a/tempest/tests/lib/services/object_storage/test_capabilities_client.py b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
index b7f972a..9df7c7c 100644
--- a/tempest/tests/lib/services/object_storage/test_capabilities_client.py
+++ b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
@@ -43,7 +43,7 @@
}
self.check_service_client_function(
self.client.list_capabilities,
- 'tempest.lib.common.rest_client.RestClient.get',
+ 'tempest.lib.common.rest_client.RestClient.raw_request',
resp,
bytes_body)
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
index a16d1d7..1749b03 100644
--- a/tempest/tests/lib/services/object_storage/test_object_client.py
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -69,7 +69,7 @@
# If the expected initial status is not 100, then an exception
# should be thrown and the connection closed
- if initial_status is 100:
+ if initial_status == 100:
status, reason = \
self.object_client.create_object_continue(cnt, obj, req_data)
else:
@@ -91,7 +91,7 @@
mock_poc.return_value.endheaders.assert_called_once_with()
# The following steps are only taken if the initial status is 100
- if initial_status is 100:
+ if initial_status == 100:
# Verify that the method returned what it was supposed to
self.assertEqual(status, 201)
diff --git a/tempest/tests/lib/services/placement/__init__.py b/tempest/tests/lib/services/placement/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/lib/services/placement/__init__.py
diff --git a/tempest/tests/lib/services/placement/test_placement_client.py b/tempest/tests/lib/services/placement/test_placement_client.py
new file mode 100644
index 0000000..1396a85
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_placement_client.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement import placement_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestPlacementClient(base.BaseServiceTest):
+ FAKE_ALLOCATION_CANDIDATES = {
+ 'allocation_requests': [
+ {'allocations': {
+ 'rp-uuid': {'resources': {'VCPU': 42}}
+ }}
+ ],
+ 'provider_summaries': {
+ 'rp-uuid': {
+ 'resources': {
+ 'VCPU': {'used': 0, 'capacity': 64},
+ 'MEMORY_MB': {'capacity': 11196, 'used': 0},
+ 'DISK_GB': {'capacity': 19, 'used': 0}
+ },
+ 'traits': ["HW_CPU_X86_SVM"],
+ }
+ }
+ }
+
+ FAKE_ALLOCATIONS = {
+ 'allocations': {
+ 'rp-uuid-1': {
+ 'resources': {
+ 'NET_BW_IGR_KILOBIT_PER_SEC': 1
+ },
+ 'generation': 14
+ },
+ 'rp-uuid2': {
+ 'resources': {
+ 'MEMORY_MB': 256,
+ 'VCPU': 1
+ },
+ 'generation': 9
+ }
+ }
+ }
+
+ def setUp(self):
+ super(TestPlacementClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = placement_client.PlacementClient(
+ fake_auth, 'placement', 'regionOne')
+
+ def _test_list_allocation_candidates(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_allocation_candidates,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ALLOCATION_CANDIDATES,
+ to_utf=bytes_body,
+ **{'resources1': 'NET_BW_IGR_KILOBIT_PER_SEC:1'})
+
+ def test_list_allocation_candidates_with_str_body(self):
+ self._test_list_allocation_candidates()
+
+ def test_list_allocation_candidates_with_bytes_body(self):
+ self._test_list_allocation_candidates(bytes_body=True)
+
+ def _test_list_allocations(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_allocations,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ALLOCATIONS,
+ to_utf=bytes_body,
+ **{'consumer_uuid': 'foo-bar'})
+
+ def test_list_allocations_with_str_body(self):
+ self._test_list_allocations()
+
+ def test_list_allocations_with_bytes_body(self):
+ self._test_list_allocations(bytes_body=True)
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
index 1da2112..07af68a 100644
--- a/tempest/tests/lib/services/registry_fixture.py
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -37,8 +37,9 @@
def __init__(self):
"""Initialise the registry fixture"""
self.services = set(['compute', 'identity.v2', 'identity.v3',
- 'image.v1', 'image.v2', 'network', 'volume.v1',
- 'volume.v2', 'volume.v3', 'object-storage'])
+ 'image.v1', 'image.v2', 'network', 'placement',
+ 'volume.v1', 'volume.v2', 'volume.v3',
+ 'object-storage'])
def _setUp(self):
# Cleanup the registry
diff --git a/tempest/tests/lib/services/volume/v3/test_attachments_client.py b/tempest/tests/lib/services/volume/v3/test_attachments_client.py
new file mode 100644
index 0000000..52c94e5
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_attachments_client.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v3 import attachments_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
+
+class TestAttachmentsClient(base.BaseServiceTest):
+
+ FAKE_ATTACHMENT_INFO = {
+ "attachment": {
+ "status": "attaching",
+ "detached_at": "2015-09-16T09:28:52.000000",
+ "connection_info": {},
+ "attached_at": "2015-09-16T09:28:52.000000",
+ "attach_mode": "ro",
+ "instance": uuids.instance_id,
+ "volume_id": uuids.volume_id,
+ "id": uuids.id,
+ }
+ }
+
+ def setUp(self):
+ super(TestAttachmentsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = attachments_client.AttachmentsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def test_show_attachment(self):
+ self.check_service_client_function(
+ self.client.show_attachment,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ATTACHMENT_INFO, attachment_id=uuids.id)
diff --git a/tempest/tests/lib/services/volume/v3/test_backups_client.py b/tempest/tests/lib/services/volume/v3/test_backups_client.py
index 5412064..97e1132 100644
--- a/tempest/tests/lib/services/volume/v3/test_backups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_backups_client.py
@@ -60,8 +60,11 @@
],
"name": "backup001",
"object_count": 22,
+ "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6",
+ "user_id": "515ba0dd59f84f25a6a084a45d8d93b2",
"size": 1,
"status": "available",
+ "updated_at": "2013-04-02T10:35:27.000000",
"volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
"is_incremental": True,
"has_dependent_backups": False
@@ -73,7 +76,16 @@
"backup": {
"id": "4c65c15f-a5c5-464b-b92a-90e4c04636a7",
"name": "fake-backup-name",
- "links": "fake-links"
+ "links": [
+ {
+ "href": "fake-url-1",
+ "rel": "self"
+ },
+ {
+ "href": "fake-url-2",
+ "rel": "bookmark"
+ }
+ ]
}
}
diff --git a/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py b/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py
index c788181..7218224 100644
--- a/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py
@@ -20,27 +20,35 @@
class TestEncryptionTypesClient(base.BaseServiceTest):
FAKE_CREATE_ENCRYPTION_TYPE = {
"encryption": {
- "id": "cbc36478b0bd8e67e89",
- "name": "FakeEncryptionType",
- "type": "fakeType",
+ "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577",
+ "control_location": "front-end",
+ "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
+ "key_size": 128,
+ "provider": "luks",
+ "cipher": "aes-xts-plain64"
+ }
+ }
+
+ FAKE_UPDATE_ENCRYPTION_TYPE = {
+ "encryption": {
+ "key_size": 64,
"provider": "LuksEncryptor",
- "cipher": "aes-xts-plain64",
- "key_size": "512",
- "control_location": "front-end"
+ "control_location": "front-end",
+ "cipher": "aes-xts-plain64"
}
}
FAKE_INFO_ENCRYPTION_TYPE = {
- "encryption": {
- "name": "FakeEncryptionType",
- "type": "fakeType",
- "description": "test_description",
- "volume_type": "fakeType",
- "provider": "LuksEncryptor",
- "cipher": "aes-xts-plain64",
- "key_size": "512",
- "control_location": "front-end"
- }
+ "volume_type_id": "cbc36478b0bd8e67e89",
+ "control_location": "front-end",
+ "deleted": False,
+ "created_at": "2015-08-27T09:49:58-05:00",
+ "updated_at": "2015-08-29T09:49:58-05:00",
+ "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
+ "key_size": 128,
+ "provider": "LuksEncryptor",
+ "deleted_at": "2015-08-30T09:49:58-05:00",
+ "cipher": "aes-xts-plain64"
}
FAKE_ENCRYPTION_SPECS_ITEM = {
@@ -50,10 +58,8 @@
def setUp(self):
super(TestEncryptionTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = encryption_types_client.EncryptionTypesClient(fake_auth,
- 'volume',
- 'regionOne'
- )
+ self.client = encryption_types_client.EncryptionTypesClient(
+ fake_auth, 'volume', 'regionOne')
def _test_create_encryption(self, bytes_body=False):
self.check_service_client_function(
@@ -101,3 +107,16 @@
{},
volume_type_id="cbc36478b0bd8e67e89",
status=202)
+
+ def test_update_encryption_type_with_str_body(self):
+ self._test_update_encryption_type()
+
+ def test_update_encryption_type_with_bytes_body(self):
+ self._test_update_encryption_type(bytes_body=True)
+
+ def _test_update_encryption_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_encryption_type,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_ENCRYPTION_TYPE,
+ bytes_body, volume_type_id="cbc36478b0bd8e67e89")
diff --git a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
index c2784b2..889fd42 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
@@ -20,9 +20,9 @@
class TestGroupSnapshotsClient(base.BaseServiceTest):
FAKE_CREATE_GROUP_SNAPSHOT = {
"group_snapshot": {
- "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
- "name": "group-snapshot-001",
- "description": "Test group snapshot 1"
+ "id": "6f519a48-3183-46cf-a32f-41815f816666",
+ "name": "first_group_snapshot",
+ "group_type_id": "58737af7-786b-48b7-ab7c-2447e74b0ef4"
}
}
@@ -34,7 +34,7 @@
"description": "Test group snapshot 1",
"group_type_id": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
"status": "available",
- "created_at": "20127-06-20T03:50:07Z"
+ "created_at": "2017-06-20T03:50:07Z"
}
}
@@ -102,8 +102,7 @@
resp_body = {
'group_snapshots': [{
'id': group_snapshot['id'],
- 'name': group_snapshot['name'],
- 'group_type_id': group_snapshot['group_type_id']}
+ 'name': group_snapshot['name']}
for group_snapshot in
self.FAKE_LIST_GROUP_SNAPSHOTS['group_snapshots']
]
diff --git a/tempest/tests/lib/services/volume/v3/test_group_types_client.py b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
index c60cc36..33c7737 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
@@ -22,10 +22,13 @@
class TestGroupTypesClient(base.BaseServiceTest):
FAKE_CREATE_GROUP_TYPE = {
"group_type": {
+ "id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"name": "group-type-001",
"description": "Test group type 1",
- "group_specs": {},
"is_public": True,
+ "group_specs": {
+ "consistent_group_snapshot_enabled": "<is> False"
+ }
}
}
@@ -35,7 +38,16 @@
"name": "group-type-001",
"description": "Test group type 1",
"is_public": True,
- "created_at": "20127-06-20T03:50:07Z",
+ "group_specs": {},
+ }
+ }
+
+ FAKE_INFO_DEFAULT_GROUP_TYPE = {
+ "group_type": {
+ "id": "7270c56e-6354-4528-8e8b-f54dee2232c8",
+ "name": "group-type-default",
+ "description": "default group type",
+ "is_public": True,
"group_specs": {},
}
}
@@ -47,24 +59,27 @@
"name": "group-type-001",
"description": "Test group type 1",
"is_public": True,
- "created_at": "2017-06-20T03:50:07Z",
- "group_specs": {},
+ "group_specs": {
+ "consistent_group_snapshot_enabled": "<is> False"
+ }
},
{
"id": "e479997c-650b-40a4-9dfe-77655818b0d2",
"name": "group-type-002",
"description": "Test group type 2",
"is_public": True,
- "created_at": "2017-06-19T01:52:47Z",
- "group_specs": {},
+ "group_specs": {
+ "consistent_group_snapshot_enabled": "<is> False"
+ }
},
{
"id": "c5c4769e-213c-40a6-a568-8e797bb691d4",
"name": "group-type-003",
"description": "Test group type 3",
"is_public": True,
- "created_at": "2017-06-18T06:34:32Z",
- "group_specs": {},
+ "group_specs": {
+ "consistent_group_snapshot_enabled": "<is> False"
+ }
}
]
}
@@ -114,6 +129,13 @@
bytes_body,
group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ def _test_show_default_group_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_default_group_type,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_INFO_DEFAULT_GROUP_TYPE,
+ bytes_body)
+
def _test_list_group_types(self, bytes_body=False):
self.check_service_client_function(
self.client.list_group_types,
@@ -123,15 +145,12 @@
def _test_update_group_types(self, bytes_body=False):
resp_body = copy.deepcopy(self.FAKE_INFO_GROUP_TYPE)
- resp_body['group_type'].pop('created_at')
-
self.check_service_client_function(
self.client.update_group_type,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
bytes_body,
- group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- name='updated-group-type-name')
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
def _test_create_or_update_group_type_specs(self, bytes_body=False):
group_specs = self.FAKE_CREATE_GROUP_TYPE_SPECS['group_specs']
@@ -192,6 +211,12 @@
def test_show_group_type_with_bytes_body(self):
self._test_show_group_type(bytes_body=True)
+ def test_show_default_group_type_with_str_body(self):
+ self._test_show_default_group_type()
+
+ def test_show_default_group_type_with_bytes_body(self):
+ self._test_show_default_group_type(bytes_body=True)
+
def test_list_group_types_with_str_body(self):
self._test_list_group_types()
diff --git a/tempest/tests/lib/services/volume/v3/test_groups_client.py b/tempest/tests/lib/services/volume/v3/test_groups_client.py
index 918e958..8a2c4ea 100644
--- a/tempest/tests/lib/services/volume/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_groups_client.py
@@ -20,27 +20,22 @@
class TestGroupsClient(base.BaseServiceTest):
FAKE_CREATE_GROUP = {
"group": {
- "name": "group-001",
- "description": "Test group 1",
- "group_type": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
- "volume_types": ["2103099d-7cc3-4e52-a2f1-23a5284416f3"],
- "availability_zone": "az1",
+ "id": "6f519a48-3183-46cf-a32f-41815f816666",
+ "name": "first_group"
}
}
FAKE_CREATE_GROUP_FROM_GROUP_SNAPSHOT = {
- "create-from-src": {
- "name": "group-002",
- "description": "Test group 2",
- "group_snapshot_id": "79c9afdb-7e46-4d71-9249-1f022886963c",
+ "group": {
+ "id": "6f519a48-3183-46cf-a32f-41815f816668",
+ "name": "first_group"
}
}
FAKE_CREATE_GROUP_FROM_GROUP = {
- "create-from-src": {
- "name": "group-003",
- "description": "Test group 3",
- "source_group_id": "e92f9dc7-0b20-492d-8ab2-3ad8fdac270e",
+ "group": {
+ "id": "6f519a48-3183-46cf-a32f-41815f816667",
+ "name": "other_group"
}
}
@@ -64,11 +59,11 @@
"volume_types": ["2103099d-7cc3-4e52-a2f1-23a5284416f3"],
"status": "available",
"availability_zone": "az1",
- "created_at": "20127-06-20T03:50:07Z"
+ "created_at": "2017-06-20T03:50:07Z"
}
}
- FAKE_LIST_GROUPS = {
+ FAKE_LIST_GROUP_DETAILS = {
"groups": [
{
"id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
@@ -105,6 +100,19 @@
]
}
+ FAKE_LIST_GROUPS = {
+ "groups": [
+ {
+ "id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
+ "name": "group-001",
+ },
+ {
+ "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
+ "name": "group-002",
+ }
+ ]
+ }
+
def setUp(self):
super(TestGroupsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -128,13 +136,21 @@
bytes_body,
group_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ def _test_list_group_details(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_groups,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_GROUP_DETAILS,
+ bytes_body,
+ detail=True)
+
def _test_list_groups(self, bytes_body=False):
self.check_service_client_function(
self.client.list_groups,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_GROUPS,
bytes_body,
- detail=True)
+ detail=False)
def test_create_group_with_str_body(self):
self._test_create_group()
@@ -148,6 +164,12 @@
def test_show_group_with_bytes_body(self):
self._test_show_group(bytes_body=True)
+ def test_list_group_details_with_str_body(self):
+ self._test_list_group_details()
+
+ def test_list_group_details_with_bytes_body(self):
+ self._test_list_group_details(bytes_body=True)
+
def test_list_groups_with_str_body(self):
self._test_list_groups()
diff --git a/tempest/tests/lib/services/volume/v3/test_hosts_client.py b/tempest/tests/lib/services/volume/v3/test_hosts_client.py
index 09bc0b1..8033e38 100644
--- a/tempest/tests/lib/services/volume/v3/test_hosts_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_hosts_client.py
@@ -48,7 +48,7 @@
"total_volume_gb": "2",
"total_snapshot_gb": "0",
"project": "(total)",
- "host": "fake-host",
+ "host": "fake-host@rbd",
"snapshot_count": "0"
}
},
@@ -58,7 +58,7 @@
"total_volume_gb": "2",
"total_snapshot_gb": "0",
"project": "f21a9c86d7114bf99c711f4874d80474",
- "host": "fake-host",
+ "host": "fake-host@lvm",
"snapshot_count": "0"
}
}
diff --git a/tempest/tests/lib/services/volume/v3/test_quotas_client.py b/tempest/tests/lib/services/volume/v3/test_quotas_client.py
index aa5d251..f09784c 100644
--- a/tempest/tests/lib/services/volume/v3/test_quotas_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_quotas_client.py
@@ -20,15 +20,26 @@
class TestQuotasClient(base.BaseServiceTest):
FAKE_QUOTAS = {
"quota_set": {
+ "id": '730a1cbd-68ca-4d68-8e09-d603f2dfa72b',
"gigabytes": 5,
"snapshots": 10,
- "volumes": 20
+ "volumes": 20,
+ 'backups': 10,
+ 'groups': 10,
+ 'per_volume_gigabytes': 1000,
+ 'backup_gigabytes': 2000
}
}
- FAKE_UPDATE_QUOTAS_REQUEST = {
+ FAKE_UPDATE_QUOTAS_RESPONSE = {
"quota_set": {
- "security_groups": 45
+ "gigabytes": 6,
+ "snapshots": 11,
+ "volumes": 21,
+ 'backups': 11,
+ 'groups': 11,
+ 'per_volume_gigabytes': 1001,
+ 'backup_gigabytes': 2001
}
}
@@ -57,7 +68,7 @@
self.check_service_client_function(
self.client.update_quota_set,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_QUOTAS_REQUEST,
+ self.FAKE_UPDATE_QUOTAS_RESPONSE,
bytes_body, tenant_id="fake_tenant")
def test_show_default_quota_set_with_str_body(self):
diff --git a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
index 1864f14..7606a52 100644
--- a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
@@ -66,7 +66,7 @@
resp_body = self.FAKE_POOLS_LIST
else:
resp_body = {'pools': [{'name': pool['name']}
- for pool in self.FAKE_POOLS_LIST['pools']]}
+ for pool in self.FAKE_POOLS_LIST['pools']]}
self.check_service_client_function(
self.client.list_pools,
'tempest.lib.common.rest_client.RestClient.get',
diff --git a/tempest/tests/lib/services/volume/v3/test_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
index 2efd2e6..1ea4c65 100644
--- a/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
@@ -20,61 +20,85 @@
class TestSnapshotsClient(base.BaseServiceTest):
FAKE_CREATE_SNAPSHOT = {
"snapshot": {
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "force": True
+ "created_at": "2019-03-11T16:24:34.469003",
+ "description": "Daily backup",
+ "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e"
}
}
- FAKE_UPDATE_SNAPSHOT_REQUEST = {
- "metadata": {
- "key": "v1"
+ FAKE_UPDATE_SNAPSHOT_RESPONSE = {
+ "snapshot": {
+ "created_at": "2019-03-12T04:53:53.426591",
+ "description": "This is yet, another snapshot.",
+ "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-002",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "070c942d-9909-42e9-a467-7a781f150c58"
}
}
FAKE_INFO_SNAPSHOT = {
"snapshot": {
- "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "status": "available",
- "size": 30,
- "created_at": "2012-02-29T03:50:07Z"
+ "created_at": "2019-03-12T04:42:00.809352",
+ "description": "Daily backup",
+ "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "os-extended-snapshot-attributes:progress": "0%",
+ "os-extended-snapshot-attributes:project_id":
+ "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37"
}
}
FAKE_LIST_SNAPSHOTS = {
"snapshots": [
{
- "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "status": "available",
- "size": 30,
- "created_at": "2012-02-29T03:50:07Z",
+ "created_at": "2019-03-11T16:24:36.464445",
+ "description": "Daily backup",
+ "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
- "contents": "junk"
- }
- },
- {
- "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
- "display_name": "snap-002",
- "display_description": "Weekly backup",
- "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
- "status": "available",
- "size": 25,
- "created_at": "2012-03-19T01:52:47Z",
- "metadata": {}
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "os-extended-snapshot-attributes:progress": "0%",
+ "os-extended-snapshot-attributes:project_id":
+ "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6"
}
]
}
FAKE_SNAPSHOT_METADATA_ITEM = {
+ "metadata": {
+ "key": "value"
+ }
+ }
+
+ FAKE_SNAPSHOT_KEY = {
"meta": {
- "key1": "value1"
+ "key": "new_value"
}
}
@@ -99,7 +123,7 @@
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_INFO_SNAPSHOT,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_list_snapshots(self, bytes_body=False):
self.check_service_client_function(
@@ -113,48 +137,48 @@
self.check_service_client_function(
self.client.create_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.post',
- self.FAKE_INFO_SNAPSHOT,
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- metadata={"key": "v1"})
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ metadata={"key": "value"})
def _test_update_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
+ self.FAKE_UPDATE_SNAPSHOT_RESPONSE,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_show_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_update_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
- bytes_body, snapshot_id="cbc36478b0bd8e67e89")
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
+ bytes_body, snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_update_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_INFO_SNAPSHOT,
+ self.FAKE_SNAPSHOT_KEY,
bytes_body, volume_type_id="cbc36478b0bd8e67e89")
def _test_show_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_SNAPSHOT_METADATA_ITEM,
+ self.FAKE_SNAPSHOT_KEY,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
id="key1")
def test_create_snapshot_with_str_body(self):
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
new file mode 100644
index 0000000..336aa32
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -0,0 +1,279 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v3 import types_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestTypesClient(base.BaseServiceTest):
+ FAKE_CREATE_VOLUME_TYPE = {
+ 'volume_type': {
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'name': 'vol-type-001',
+ 'description': 'volume type 0001',
+ 'is_public': True,
+ 'os-volume-type-access:is_public': True,
+ 'extra_specs': {
+ 'volume_backend_name': 'rbd'
+ }
+ }
+ }
+
+ FAKE_DEFAULT_VOLUME_TYPE_INFO = {
+ 'volume_type': {
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'qos_specs_id': None,
+ 'name': 'volume-type-test',
+ 'description': 'default volume type',
+ 'is_public': True,
+ 'os-volume-type-access:is_public': True,
+ 'extra_specs': {
+ 'volume_backend_name': 'rbd'
+ }
+ }
+ }
+
+ FAKE_UPDATE_VOLUME_TYPE = {
+ 'volume_type': {
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'name': 'volume-type-test',
+ 'description': 'default volume type',
+ 'is_public': True,
+ 'extra_specs': {
+ 'volume_backend_name': 'rbd'
+ }
+ }
+ }
+
+ FAKE_VOLUME_TYPES = {
+ 'volume_types': [
+ {
+ 'name': 'volume_type01',
+ 'qos_specs_id': None,
+ 'extra_specs': {
+ 'volume_backend_name': 'lvmdriver-1'
+ },
+ 'os-volume-type-access:is_public': True,
+ 'is_public': True,
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'description': None
+ },
+ {
+ 'name': 'volume_type02',
+ 'qos_specs_id': None,
+ 'extra_specs': {
+ 'volume_backend_name': 'lvmdriver-1'
+ },
+ 'os-volume-type-access:is_public': True,
+ 'is_public': True,
+ 'id': '8eb69a46-df97-4e41-9586-9a40a7533803',
+ 'description': None
+ }
+ ]
+ }
+
+ FAKE_VOLUME_TYPE_EXTRA_SPECS = {
+ 'extra_specs': {
+ 'capabilities': 'gpu'
+ }
+ }
+
+ FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS = {
+ 'capabilities': 'gpu'
+ }
+
+ FAKE_VOLUME_TYPE_ACCESS = {
+ 'volume_type_access': [{
+ 'volume_type_id': '3c67e124-39ad-4ace-a507-8bb7bf510c26',
+ 'project_id': 'f270b245cb11498ca4031deb7e141cfa'
+ }]
+ }
+
+ def setUp(self):
+ super(TestTypesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = types_client.TypesClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_volume_types(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_volume_types,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_TYPES,
+ bytes_body)
+
+ def _test_show_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
+ to_utf=bytes_body,
+ volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
+
+ def _test_create_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_VOLUME_TYPE,
+ to_utf=bytes_body,
+ name='volume-type-test')
+
+ def _test_delete_volume_type(self):
+ self.check_service_client_function(
+ self.client.delete_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_list_volume_types_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_volume_types_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
+ to_utf=bytes_body,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_show_volume_type_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ extra_specs_name='capabilities',
+ to_utf=bytes_body)
+
+ def _test_create_volume_type_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
+ volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff",
+ extra_specs=self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
+ to_utf=bytes_body)
+
+ def _test_delete_volume_type_extra_specs(self):
+ self.check_service_client_function(
+ self.client.delete_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ extra_spec_name='volume_backend_name')
+
+ def _test_update_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_VOLUME_TYPE,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ to_utf=bytes_body,
+ name='update-volume-type-test',
+ description='test update volume type description')
+
+ def _test_update_volume_type_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
+ extra_spec_name='capabilities',
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ extra_specs=self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
+ to_utf=bytes_body)
+
+ def _test_add_type_access(self):
+ self.check_service_client_function(
+ self.client.add_type_access,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_remove_type_access(self):
+ self.check_service_client_function(
+ self.client.remove_type_access,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_list_type_access(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_type_access,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_TYPE_ACCESS,
+ volume_type_id='3c67e124-39ad-4ace-a507-8bb7bf510c26',
+ to_utf=bytes_body)
+
+ def test_list_volume_types_with_str_body(self):
+ self._test_list_volume_types()
+
+ def test_list_volume_types_with_bytes_body(self):
+ self._test_list_volume_types(bytes_body=True)
+
+ def test_show_volume_type_with_str_body(self):
+ self._test_show_volume_type()
+
+ def test_show_volume_type_with_bytes_body(self):
+ self._test_show_volume_type(bytes_body=True)
+
+ def test_create_volume_type_str_body(self):
+ self._test_create_volume_type()
+
+ def test_create_volume_type_with_bytes_body(self):
+ self._test_create_volume_type(bytes_body=True)
+
+ def test_list_volume_types_extra_specs_with_str_body(self):
+ self._test_list_volume_types_extra_specs()
+
+ def test_list_volume_types_extra_specs_with_bytes_body(self):
+ self._test_list_volume_types_extra_specs(bytes_body=True)
+
+ def test_show_volume_type_extra_specs_with_str_body(self):
+ self._test_show_volume_type_extra_specs()
+
+ def test_show_volume_type_extra_specs_with_bytes_body(self):
+ self._test_show_volume_type_extra_specs(bytes_body=True)
+
+ def test_create_volume_type_extra_specs_with_str_body(self):
+ self._test_create_volume_type_extra_specs()
+
+ def test_create_volume_type_extra_specs_with_bytes_body(self):
+ self._test_create_volume_type_extra_specs(bytes_body=True)
+
+ def test_delete_volume_type_extra_specs(self):
+ self._test_delete_volume_type_extra_specs()
+
+ def test_update_volume_type_with_str_body(self):
+ self._test_update_volume_type()
+
+ def test_update_volume_type_with_bytes_body(self):
+ self._test_update_volume_type(bytes_body=True)
+
+ def test_delete_volume_type(self):
+ self._test_delete_volume_type()
+
+ def test_update_volume_type_extra_specs_with_str_body(self):
+ self._test_update_volume_type_extra_specs()
+
+ def test_update_volume_type_extra_specs_with_bytes_body(self):
+ self._test_update_volume_type_extra_specs(bytes_body=True)
+
+ def test_add_type_access(self):
+ self._test_add_type_access()
+
+ def test_remove_type_access(self):
+ self._test_remove_type_access()
+
+ def test_list_type_access_with_str_body(self):
+ self._test_list_type_access()
+
+ def test_list_type_access_with_bytes_body(self):
+ self._test_list_type_access(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_versions_client.py b/tempest/tests/lib/services/volume/v3/test_versions_client.py
index 9627b9a..575cae3 100644
--- a/tempest/tests/lib/services/volume/v3/test_versions_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_versions_client.py
@@ -69,6 +69,27 @@
]
}
+ FAKE_VERSION_DETAILS = {
+ "versions": [
+ {
+ "id": "v3.0",
+ "links": [
+ {"href": "https://docs.openstack.org/",
+ "type": "text/html", "rel": "describedby"},
+ {"href": "http://127.0.0.1:44895/v3/", "rel": "self"}
+ ],
+ "media-types": [
+ {"base": "application/json",
+ "type": "application/vnd.openstack.volume+json;version=3"}
+ ],
+ "min_version": "3.0",
+ "status": "CURRENT",
+ "updated": "2018-07-17T00:00:00Z",
+ "version": "3.59"
+ }
+ ]
+ }
+
def setUp(self):
super(TestVersionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -76,6 +97,14 @@
'volume',
'regionOne')
+ def _test_get_base_version_url(self, url, expected_base_url):
+ fake_auth = fake_auth_provider.FakeAuthProvider(fake_base_url=url)
+ client = versions_client.VersionsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+ self.assertEqual(expected_base_url,
+ client._get_base_version_url())
+
def _test_list_versions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_versions,
@@ -84,8 +113,30 @@
bytes_body,
300)
+ def _test_show_version(self, version, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_version,
+ 'tempest.lib.common.rest_client.RestClient.raw_request',
+ self.FAKE_VERSION_DETAILS,
+ bytes_body,
+ 200, version=version)
+
def test_list_versions_with_str_body(self):
self._test_list_versions()
def test_list_versions_with_bytes_body(self):
self._test_list_versions(bytes_body=True)
+
+ def test_show_version_details_with_str_body(self):
+ self._test_show_version('v3')
+
+ def test_show_version_details_with_bytes_body(self):
+ self._test_show_version('v3', bytes_body=True)
+
+ def test_get_base_version_url_app_name(self):
+ self._test_get_base_version_url('https://bar.org/volume/v1/123',
+ 'https://bar.org/volume/')
+ self._test_get_base_version_url('https://bar.org/volume/v2/123',
+ 'https://bar.org/volume/')
+ self._test_get_base_version_url('https://bar.org/volume/v3/123',
+ 'https://bar.org/volume/')
diff --git a/tempest/tests/lib/services/volume/v3/test_volumes_client.py b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
index 1250536..56c1a35 100644
--- a/tempest/tests/lib/services/volume/v3/test_volumes_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
@@ -24,27 +24,25 @@
FAKE_VOLUME_SUMMARY = {
"volume-summary": {
- "total_size": 20,
- "total_count": 5
+ "total_size": 4,
+ "total_count": 4,
+ "metadata": {
+ "key1": ["value1", "value2"],
+ "key2": ["value2"]
+ }
}
}
FAKE_VOLUME_METADATA_ITEM = {
"meta": {
- "key1": "value1"
+ "name": "metadata1"
}
}
FAKE_VOLUME_IMAGE_METADATA = {
"metadata": {
- "container_format": "bare",
- "min_ram": "0",
- "disk_format": "raw",
- "image_name": "xly-ubuntu16-server",
- "image_id": "3e087b0c-10c5-4255-b147-6e8e9dbad6fc",
- "checksum": "008f5d22fe3cb825d714da79607a90f9",
- "min_disk": "0",
- "size": "8589934592"
+ "key1": "value1",
+ "key2": "value2"
}
}
diff --git a/tempest/tests/lib/test_api_microversion_fixture.py b/tempest/tests/lib/test_api_microversion_fixture.py
new file mode 100644
index 0000000..ad98ed0
--- /dev/null
+++ b/tempest/tests/lib/test_api_microversion_fixture.py
@@ -0,0 +1,58 @@
+# Copyright 2019 NEC Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common import api_microversion_fixture
+from tempest.lib.services.compute import base_compute_client
+from tempest.lib.services.placement import base_placement_client
+from tempest.lib.services.volume import base_client
+from tempest.tests import base
+
+
+class TestAPIMicroversionFixture(base.TestCase):
+ def setUp(self):
+ super(TestAPIMicroversionFixture, self).setUp()
+ # Verify that all the microversion are reset back to None
+ # by Fixture.
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_compute_microversion(self):
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ compute_microversion='2.10'))
+ self.assertEqual('2.10', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_volume_microversion(self):
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ volume_microversion='3.10'))
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.10', base_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_placement_microversion(self):
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ placement_microversion='1.10'))
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_client.VOLUME_MICROVERSION)
+ self.assertEqual('1.10', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_multiple_service_microversion(self):
+ self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+ compute_microversion='2.10', volume_microversion='3.10',
+ placement_microversion='1.10'))
+ self.assertEqual('2.10', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.10', base_client.VOLUME_MICROVERSION)
+ self.assertEqual('1.10', base_placement_client.PLACEMENT_MICROVERSION)
diff --git a/tempest/tests/lib/test_base.py b/tempest/tests/lib/test_base.py
index 27cda1a..2c16e1c 100644
--- a/tempest/tests/lib/test_base.py
+++ b/tempest/tests/lib/test_base.py
@@ -48,6 +48,7 @@
@classmethod
def setUpClass(cls): # noqa
"""Simulate absence of super() call."""
+ cls.orig_skip_exception = cls.skipException
def setUp(self):
try:
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index 0b1a599..9c6cac7 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -13,7 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
+
import mock
+import six
import testtools
from tempest.lib import base as test
@@ -32,9 +35,17 @@
# By our decorators.attr decorator the attribute __testtools_attrs
# will be set only for 'type' argument, so we test it first.
if 'type' in decorator_args:
- # this is what testtools sets
- self.assertEqual(getattr(foo, '__testtools_attrs'),
- set(expected_attrs))
+ if 'condition' in decorator_args:
+ if decorator_args['condition']:
+ # The expected attrs should be in the function.
+ self.assertEqual(set(expected_attrs),
+ getattr(foo, '__testtools_attrs'))
+ else:
+ # The expected attrs should not be in the function.
+ self.assertNotIn('__testtools_attrs', foo)
+ else:
+ self.assertEqual(set(expected_attrs),
+ getattr(foo, '__testtools_attrs'))
def test_attr_without_type(self):
self._test_attr_helper(expected_attrs='baz', bar='baz')
@@ -50,10 +61,44 @@
def test_attr_decorator_with_duplicated_type(self):
self._test_attr_helper(expected_attrs=['foo'], type=['foo', 'foo'])
+ def test_attr_decorator_condition_false(self):
+ self._test_attr_helper(None, type='slow', condition=False)
-class TestSkipBecauseDecorator(base.TestCase):
- def _test_skip_because_helper(self, expected_to_skip=True,
- **decorator_args):
+ def test_attr_decorator_condition_true(self):
+ self._test_attr_helper(expected_attrs=['slow'], type='slow',
+ condition=True)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseSkipDecoratorTests(object):
+
+ @abc.abstractmethod
+ def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
+ **decorator_args):
+ return
+
+ def test_skip_launchpad_bug(self):
+ self._test_skip_helper(bug='12345')
+
+ def test_skip_storyboard_bug(self):
+ self._test_skip_helper(bug='1992', bug_type='storyboard')
+
+ def test_skip_bug_without_bug_never_skips(self):
+ """Never skip without a bug parameter."""
+ self._test_skip_helper(
+ raise_exception=False, expected_to_skip=False, condition=True)
+ self._test_skip_helper(
+ raise_exception=False, expected_to_skip=False)
+
+ def test_skip_invalid_bug_number(self):
+ """Raise InvalidParam if with an invalid bug number"""
+ self.assertRaises(lib_exc.InvalidParam, self._test_skip_helper,
+ bug='critical_bug')
+
+
+class TestSkipBecauseDecorator(base.TestCase, BaseSkipDecoratorTests):
+ def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
+ **decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@@ -75,38 +120,56 @@
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
- def test_skip_because_launchpad_bug(self):
- self._test_skip_because_helper(bug='12345')
-
def test_skip_because_launchpad_bug_and_condition_true(self):
- self._test_skip_because_helper(bug='12348', condition=True)
+ self._test_skip_helper(bug='12348', condition=True)
def test_skip_because_launchpad_bug_and_condition_false(self):
- self._test_skip_because_helper(expected_to_skip=False,
- bug='12349', condition=False)
-
- def test_skip_because_storyboard_bug(self):
- self._test_skip_because_helper(bug='1992', bug_type='storyboard')
-
- def test_skip_because_storyboard_bug_and_condition_true(self):
- self._test_skip_because_helper(bug='1992', bug_type='storyboard',
- condition=True)
+ self._test_skip_helper(expected_to_skip=False,
+ bug='12349', condition=False)
def test_skip_because_storyboard_bug_and_condition_false(self):
- self._test_skip_because_helper(expected_to_skip=False,
- bug='1992', bug_type='storyboard',
- condition=False)
+ self._test_skip_helper(expected_to_skip=False,
+ bug='1992', bug_type='storyboard',
+ condition=False)
- def test_skip_because_bug_without_bug_never_skips(self):
- """Never skip without a bug parameter."""
- self._test_skip_because_helper(expected_to_skip=False,
- condition=True)
- self._test_skip_because_helper(expected_to_skip=False)
+ def test_skip_because_storyboard_bug_and_condition_true(self):
+ self._test_skip_helper(bug='1992', bug_type='storyboard',
+ condition=True)
- def test_skip_because_invalid_bug_number(self):
- """Raise InvalidParam if with an invalid bug number"""
- self.assertRaises(lib_exc.InvalidParam, self._test_skip_because_helper,
- bug='critical_bug')
+
+class TestUnstableTestDecorator(base.TestCase, BaseSkipDecoratorTests):
+
+ def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
+ **decorator_args):
+ fail_test_reason = "test_bar failed"
+
+ class TestFoo(test.BaseTestCase):
+
+ @decorators.unstable_test(**decorator_args)
+ def test_bar(self):
+ if raise_exception:
+ raise Exception(fail_test_reason)
+ else:
+ return 0
+
+ t = TestFoo('test_bar')
+ if expected_to_skip:
+ e = self.assertRaises(testtools.TestCase.skipException, t.test_bar)
+ bug = decorator_args['bug']
+ bug_type = decorator_args.get('bug_type', 'launchpad')
+ self.assertRegex(
+ str(e),
+ r'Marked as unstable and skipped because of bug\: %s.*, '
+ 'failure was: %s' % (decorators._get_bug_url(bug, bug_type),
+ fail_test_reason)
+ )
+ else:
+ # assert that test_bar returned 0
+ self.assertEqual(TestFoo('test_bar').test_bar(), 0)
+
+ def test_skip_bug_given_exception_not_raised(self):
+ self._test_skip_helper(raise_exception=False, expected_to_skip=False,
+ bug='1234')
class TestIdempotentIdDecorator(base.TestCase):
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index 37fe646..c849231 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -170,7 +170,8 @@
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_timeout_in_exec_command(self):
- chan_mock, poll_mock, _ = self._set_mocks_for_select([0, 0, 0], True)
+ chan_mock, poll_mock, _, _ = (
+ self._set_mocks_for_select([0, 0, 0], True))
# Test for a timeout condition immediately raised
client = ssh.Client('localhost', 'root', timeout=2)
@@ -187,7 +188,7 @@
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_exec_command(self):
- chan_mock, poll_mock, select_mock = (
+ chan_mock, poll_mock, select_mock, client_mock = (
self._set_mocks_for_select([[1, 0, 0]], True))
chan_mock.recv_exit_status.return_value = 0
@@ -211,6 +212,8 @@
chan_mock.recv_stderr.assert_called_once_with(1024)
chan_mock.recv_exit_status.assert_called_once_with()
+ client_mock.close.assert_called_once_with()
+
def _set_mocks_for_select(self, poll_data, ito_value=False):
gsc_mock = self.patch('tempest.lib.common.ssh.Client.'
'_get_ssh_connection')
@@ -235,14 +238,15 @@
else:
poll_mock.poll.return_value = poll_data
- return chan_mock, poll_mock, select_mock
+ return chan_mock, poll_mock, select_mock, client_mock
_utf8_string = six.unichr(1071)
_utf8_bytes = _utf8_string.encode("utf-8")
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_exec_good_command_output(self):
- chan_mock, poll_mock, _ = self._set_mocks_for_select([1, 0, 0])
+ chan_mock, poll_mock, _, _ = (
+ self._set_mocks_for_select([1, 0, 0]))
closed_prop = mock.PropertyMock(return_value=True)
type(chan_mock).closed = closed_prop
@@ -257,7 +261,8 @@
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_exec_bad_command_output(self):
- chan_mock, poll_mock, _ = self._set_mocks_for_select([1, 0, 0])
+ chan_mock, poll_mock, _, _ = (
+ self._set_mocks_for_select([1, 0, 0]))
closed_prop = mock.PropertyMock(return_value=True)
type(chan_mock).closed = closed_prop
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 9534ce8..7c31185 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -48,6 +48,7 @@
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
+
def test_no_setup_teardown_class_for_tests(self):
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
@@ -100,17 +101,6 @@
'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
"\n"))
- def test_no_vi_headers(self):
- # NOTE(mtreinish) The lines parameter is used only for finding the
- # line location in the file. So these tests just pass a list of an
- # arbitrary length to use for verifying the check function.
- self.assertTrue(checks.no_vi_headers(
- '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
- self.assertTrue(checks.no_vi_headers(
- '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
- self.assertFalse(checks.no_vi_headers(
- '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
-
def test_service_tags_not_in_module_path(self):
self.assertTrue(checks.service_tags_not_in_module_path(
"@utils.services('compute')",
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index fc50736..49fd010 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -532,13 +532,14 @@
# If a skip condition is hit in the test, no credentials or resource
# is provisioned / cleaned-up
self.mocks['skip_checks'].side_effect = (
- testtools.testcase.TestSkipped())
+ testtools.TestCase.skipException())
suite = unittest.TestSuite((self.test,))
log = []
result = LoggingTestResult(log)
suite.run(result)
# If we trigger a skip condition, teardown is not invoked at all
- self.assertEqual(self.SETUP_FIXTURES[:2],
+ self.assertEqual((self.SETUP_FIXTURES[:2] +
+ [self.TEARDOWN_FIXTURES[0]]),
self.test.fixtures_invoked)
def test_skip_credentials_fails(self):
diff --git a/test-requirements.txt b/test-requirements.txt
index 196387c..17a7d2a 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,8 +1,9 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-hacking>=1.1.0,<1.2.0 # Apache-2.0
+hacking>=3.0.1,<3.1.0;python_version>='3.5' # Apache-2.0
mock>=2.0.0 # BSD
coverage!=4.4,>=4.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
+pycodestyle>=2.0.0,<2.6.0 # MIT
flake8-import-order==0.11 # LGPLv3
diff --git a/tools/format.sh b/tools/format.sh
new file mode 100755
index 0000000..ef5cc92
--- /dev/null
+++ b/tools/format.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+cd $(dirname "$(readlink -f "$0")")
+
+AUTOPEP8=`which autopep8 2>/dev/null`
+
+if [[ -z "$AUTOPEP8" ]]; then
+ AUTOPEP8=`which autopep8-3`
+fi
+
+if [[ -z "$AUTOPEP8" ]]; then
+ echo "Unable to locate autopep8" >&2
+ exit 2
+fi
+
+# isort is not compatible with the default flake8 (H306), maybe flake8-isort
+# isort -rc -sl -fss ../tempest ../setup.py
+$AUTOPEP8 --exit-code --max-line-length=79 --experimental --in-place \
+ -r ../tempest ../setup.py
+ERROR=$?
+
+if [[ $ERROR -eq 0 ]]; then
+ echo "Formatting was not needed." >&2
+ exit 0
+elif [[ $ERROR -eq 1 ]]; then
+ echo "Formatting failed.." >&2
+ exit 1
+else
+ echo "done" >&2
+fi
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 4eb78fb..5ffef3e 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -19,24 +19,42 @@
#
# In order to function correctly, the environment in which the
# script runs must have
-# * network access to the review.openstack.org Gerrit API
+# * network access to the review.opendev.org Gerrit API
# working directory
-# * network access to https://git.openstack.org/cgit
+# * network access to https://opendev.org/openstack
import json
import re
+import sys
-try:
- # For Python 3.0 and later
- from urllib.error import HTTPError
- import urllib.request as urllib
-except ImportError:
- # Fall back to Python 2's urllib2
- import urllib2 as urllib
- from urllib2 import HTTPError
+import urllib3
+from urllib3.util import retry
+# List of projects having tempest plugin stale or unmaintained for a long time
+# (6 months or more)
+# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
+# when the patches are merged.
+BLACKLIST = [
+ 'x/gce-api', # It looks gce-api doesn't support python3 yet.
+ 'x/glare', # To avoid sanity-job failure
+ 'x/group-based-policy', # It looks this doesn't support python3 yet.
+ 'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
+ 'openstack/networking-generic-switch',
+ # https://review.opendev.org/#/c/634846/
+ 'openstack/networking-l2gw-tempest-plugin',
+ # https://review.opendev.org/#/c/635093/
+ 'openstack/networking-midonet', # https://review.opendev.org/#/c/635096/
+ 'x/networking-plumgrid', # https://review.opendev.org/#/c/635096/
+ 'x/networking-spp', # https://review.opendev.org/#/c/635098/
+ 'openstack/neutron-dynamic-routing',
+ # https://review.opendev.org/#/c/637718/
+ 'openstack/neutron-vpnaas', # https://review.opendev.org/#/c/637719/
+ 'x/tap-as-a-service', # To avoid sanity-job failure
+ 'x/valet', # https://review.opendev.org/#/c/638339/
+ 'x/kingbird', # https://bugs.launchpad.net/kingbird/+bug/1869722
+]
-url = 'https://review.openstack.org/projects/'
+url = 'https://review.opendev.org/projects/'
# This is what a project looks like
'''
@@ -46,47 +64,65 @@
},
'''
-
-def is_in_openstack_namespace(proj):
- return proj.startswith('openstack/')
-
-# Rather than returning a 404 for a nonexistent file, cgit delivers a
-# 0-byte response to a GET request. It also does not provide a
-# Content-Length in a HEAD response, so the way we tell if a file exists
-# is to check the length of the entire GET response body.
+http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
+retries = retry.Retry(status_forcelist=[500], backoff_factor=1.0)
def has_tempest_plugin(proj):
try:
- r = urllib.urlopen(
- "https://git.openstack.org/cgit/%s/plain/setup.cfg" % proj)
- except HTTPError as err:
- if err.code == 404:
+ r = http.request('GET', "https://opendev.org/%s/raw/branch/"
+ "master/setup.cfg" % proj, retries=retries)
+ if r.status == 404:
return False
+ except urllib3.exceptions.MaxRetryError as err:
+ # We should not ignore non 404 errors.
+ raise err
p = re.compile(r'^tempest\.test_plugins', re.M)
- if p.findall(r.read().decode('utf-8')):
+ if p.findall(r.data.decode('utf-8')):
return True
else:
False
-r = urllib.urlopen(url)
+if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
+ for black_plugin in BLACKLIST:
+ print(black_plugin)
+ # We just need BLACKLIST when we use this `blacklist` option.
+ # So, this exits here.
+ sys.exit()
+
+r = http.request('GET', url, retries=retries)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
-projects = sorted(filter(is_in_openstack_namespace, json.loads(r.read()[4:])))
+content = r.data.decode('utf-8')[4:]
+projects = sorted(json.loads(content))
-# Retrieve projects having no deb, puppet, ui or spec namespace as those
+# Retrieve projects having no deployment tool repo (such as deb,
+# puppet, ansible, etc.), infra repos, ui or spec namespace as those
# namespaces do not contains tempest plugins.
projects_list = [i for i in projects if not (
+ i.startswith('openstack-dev/') or
+ i.startswith('openstack-infra/') or
+ i.startswith('openstack/ansible-') or
+ i.startswith('openstack/charm-') or
+ i.startswith('openstack/cookbook-openstack-') or
+ i.startswith('openstack/devstack-') or
+ i.startswith('openstack/fuel-') or
i.startswith('openstack/deb-') or
i.startswith('openstack/puppet-') or
+ i.startswith('openstack/openstack-ansible-') or
+ i.startswith('x/deb-') or
+ i.startswith('x/fuel-') or
+ i.startswith('x/python-') or
+ i.startswith('zuul/') or
i.endswith('-ui') or
i.endswith('-specs'))]
found_plugins = list(filter(has_tempest_plugin, projects_list))
-# Every element of the found_plugins list begins with "openstack/".
-# We drop those initial 10 octets when printing the list.
+# We have tempest plugins not only in 'openstack/' namespace but also the
+# other name spaces such as 'airship/', 'x/', etc.
+# So, we print all of them here.
for project in found_plugins:
- print(project[10:])
+ print(project)
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index b27b23a..33675ed 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -28,9 +28,9 @@
# * the environment variable git_dir pointing to the location
# * of said git repositories
# ) OR (
-# * network access to the review.openstack.org Gerrit API
+# * network access to the review.opendev.org Gerrit API
# working directory
-# * network access to https://git.openstack.org/cgit
+# * network access to https://opendev.org/openstack
# ))
#
# If a file named doc/source/data/tempest-plugins-registry.header or
@@ -41,8 +41,6 @@
set -ex
(
-declare -A plugins
-
if [[ -r doc/source/data/tempest-plugins-registry.header ]]; then
cat doc/source/data/tempest-plugins-registry.header
fi
@@ -63,28 +61,45 @@
printf " ===\n"
}
+function print_plugin_table {
+ title_underline ${name_col_len}
+ printf "%-3s %-${name_col_len}s %s\n" "SR" "Plugin Name" "URL"
+ title_underline ${name_col_len}
+
+ i=0
+ for plugin in $1; do
+ i=$((i+1))
+ giturl="https://opendev.org/${plugin}"
+ printf "%-3s %-${name_col_len}s %s\n" "$i" "${plugin}" "${giturl}"
+ done
+
+ title_underline ${name_col_len}
+}
+
printf "\n\n"
-title_underline ${name_col_len}
-printf "%-3s %-${name_col_len}s %s\n" "SR" "Plugin Name" "URL"
-title_underline ${name_col_len}
+print_plugin_table "${sorted_plugins}"
-i=0
-for plugin in ${sorted_plugins}; do
- i=$((i+1))
- giturl="git://git.openstack.org/openstack/${plugin}"
- gitlink="https://git.openstack.org/cgit/openstack/${plugin}"
- printf "%-3s %-${name_col_len}s %s\n" "$i" "${plugin}" "\`${giturl} <${gitlink}>\`__"
-done
+printf "\n\n"
-title_underline ${name_col_len}
+# Print BLACKLIST
+if [[ -r doc/source/data/tempest-blacklisted-plugins-registry.header ]]; then
+ cat doc/source/data/tempest-blacklisted-plugins-registry.header
+fi
+
+blacklist=$(python tools/generate-tempest-plugins-list.py blacklist)
+name_col_len=$(echo "${blacklist}" | wc -L)
+name_col_len=$(( name_col_len + 20 ))
+
+printf "\n\n"
+print_plugin_table "${blacklist}"
printf "\n\n"
if [[ -r doc/source/data/tempest-plugins-registry.footer ]]; then
cat doc/source/data/tempest-plugins-registry.footer
fi
-) > doc/source/plugin-registry.rst
+) > doc/source/plugins/plugin-registry.rst
if [[ -n ${1} ]]; then
- cp doc/source/plugin-registry.rst ${1}/doc/source/plugin-registry.rst
+ cp doc/source/plugins/plugin-registry.rst ${1}/doc/source/plugins/plugin-registry.rst
fi
diff --git a/tools/tempest-integrated-gate-compute-blacklist.txt b/tools/tempest-integrated-gate-compute-blacklist.txt
new file mode 100644
index 0000000..8805262
--- /dev/null
+++ b/tools/tempest-integrated-gate-compute-blacklist.txt
@@ -0,0 +1,13 @@
+# This file includes the backlist of tests which need to be
+# skipped for Integrated-gate-compute template. Integrated-gate-compute template
+# needs to run only Nova, Neutron, Cinder and Glance related tests and rest all
+# tests will be skipped by below list.
+
+# Skip keystone and Swift API tests.
+tempest.api.identity
+tempest.api.object_storage
+
+# Skip Swift only scenario tests.
+tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
+tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
+tempest.scenario.test_volume_backup_restore.TestVolumeBackupRestore.test_volume_backup_restore
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-blacklist.txt
new file mode 100644
index 0000000..263b2e4
--- /dev/null
+++ b/tools/tempest-integrated-gate-networking-blacklist.txt
@@ -0,0 +1,19 @@
+# This file includes the backlist of tests which need to be
+# skipped for Integrated-gate-networking template.
+
+# Skip Cinder, Glance, keystone and Swift API tests.
+tempest.api.volume
+tempest.api.compute.volumes
+tempest.api.image
+tempest.api.compute.images
+tempest.api.object_storage
+tempest.api.identity
+
+# Skip Cinder, Glance and Swift only scenario tests.
+tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks
+tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup
+tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
+tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
diff --git a/tools/tempest-integrated-gate-object-storage-blacklist.txt b/tools/tempest-integrated-gate-object-storage-blacklist.txt
new file mode 100644
index 0000000..c164343
--- /dev/null
+++ b/tools/tempest-integrated-gate-object-storage-blacklist.txt
@@ -0,0 +1,18 @@
+# This file includes the backlist of tests which need to be
+# skipped for Integrated-gate-object-storage template. Integrated-gate-object-storage template
+# needs to run only Swift, Cinder and Glance related tests and rest all
+# tests will be skipped by below list.
+
+# Skip network, compute, keystone API tests.
+tempest.api.network
+tempest.api.compute
+tempest.api.identity
+
+# Skip network, compute, keystone only scenario tests
+tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
+tempest.scenario.test_network_v6.TestGettingAddress
+tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
+tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_server_sequence_suspend_resume
+tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
+tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes
diff --git a/tools/tempest-integrated-gate-placement-blacklist.txt b/tools/tempest-integrated-gate-placement-blacklist.txt
new file mode 100644
index 0000000..efba796
--- /dev/null
+++ b/tools/tempest-integrated-gate-placement-blacklist.txt
@@ -0,0 +1,19 @@
+# This file includes the backlist of tests which need to be
+# skipped for Integrated-gate-placement template. Integrated-gate-placement template
+# needs to run only Nova and Neutron related tests and rest all
+# tests will be skipped by below list.
+
+# Skip Cinder, Glance, keystone and Swift API tests.
+tempest.api.volume
+tempest.api.image
+tempest.api.identity
+tempest.api.object_storage
+
+# Skip Cinder, Glance and Swift only scenario tests.
+tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks
+tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup
+tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
+tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
new file mode 100644
index 0000000..1ef6bb5
--- /dev/null
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -0,0 +1,14 @@
+# This file includes the backlist of tests which need to be
+# skipped for Integrated-gate-storage template. Integrated-gate-storage template
+# needs to run only Cinder, Glance, Swift and Nova related tests and rest all
+# tests will be skipped by below list.
+
+# Skip network, keystone API tests.
+tempest.api.network
+tempest.api.identity
+
+# Skip network only scenario tests.
+tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
+tempest.scenario.test_network_v6.TestGettingAddress
+tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index 8b4f913..2ff4aea 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -18,20 +18,17 @@
# This script is intended to check the sanity of tempest plugins against
# tempest master.
# What it does:
-# * Creates the virtualenv
-# * Install tempest
# * Retrieve the project lists having tempest plugin if project name is
# given.
-# * For each project in a list, It does:
+# * For each project in a list, it does:
+# * Create virtualenv and install tempest in it
# * Clone the Project
# * Install the Project and also installs dependencies from
# test-requirements.txt.
# * Create Tempest workspace
# * List tempest plugins
# * List tempest plugins tests
-# * Uninstall the project and its dependencies
-# * Again Install tempest
-# * Again repeat the step from cloning project
+# * Delete virtualenv and project repo
#
# If one of the step fails, The script will exit with failure.
@@ -46,67 +43,73 @@
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
-# List of projects having tempest plugin stale or unmaintained from long time
-BLACKLIST="trio2o"
+
+BLACKLIST="$(python tools/generate-tempest-plugins-list.py blacklist)"
# Function to clone project using zuul-cloner or from git
-function clone_project() {
+function clone_project {
if [ -e /usr/zuul-env/bin/zuul-cloner ]; then
/usr/zuul-env/bin/zuul-cloner --cache-dir /opt/git \
- git://git.openstack.org \
- openstack/"$1"
+ https://opendev.org \
+ "$1"
elif [ -e /usr/bin/git ]; then
- /usr/bin/git clone git://git.openstack.org/openstack/"$1" \
- openstack/"$1"
+ /usr/bin/git clone https://opendev.org/"$1" \
+ "$1"
fi
}
-# Create virtualenv to perform sanity operation
-SANITY_DIR=$(pwd)
-virtualenv "$SANITY_DIR"/.venv
-export TVENV="$SANITY_DIR/tools/with_venv.sh"
-cd "$SANITY_DIR"
+: ${TOX_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
+DEPS="-c${TOX_CONSTRAINTS_FILE}"
-# Install tempest in a venv
-"$TVENV" pip install .
+# function to create virtualenv to perform sanity operation
+function prepare_workspace {
+ SANITY_DIR=$(pwd)
+ virtualenv -p python3 --clear "$SANITY_DIR"/.venv
+ export TVENV="$SANITY_DIR/tools/with_venv.sh"
+ cd "$SANITY_DIR"
+
+ # Install tempest with test dependencies in a venv
+ "$TVENV" pip install -e . -r test-requirements.txt
+}
# Function to install project
-function install_project() {
- "$TVENV" pip install "$SANITY_DIR"/openstack/"$1"
+function install_project {
+ "$TVENV" pip install $DEPS "$SANITY_DIR"/"$1"
# Check for test-requirements.txt file in a project then install it.
- if [ -e "$SANITY_DIR"/openstack/"$1"/test-requirements.txt ]; then
- "$TVENV" pip install -r "$SANITY_DIR"/openstack/"$1"/test-requirements.txt
+ if [ -e "$SANITY_DIR"/"$1"/test-requirements.txt ]; then
+ "$TVENV" pip install $DEPS -r "$SANITY_DIR"/"$1"/test-requirements.txt
fi
}
# Function to perform sanity checking on Tempest plugin
-function tempest_sanity() {
- "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity
- cd "$SANITY_DIR"/tempest_sanity
- "$TVENV" tempest list-plugins
+function tempest_sanity {
+ "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity && \
+ cd "$SANITY_DIR"/tempest_sanity && \
+ "$TVENV" tempest list-plugins && \
"$TVENV" tempest run -l
+ retval=$?
# Delete tempest workspace
+ # NOTE: Cleaning should be done even if an error occurs.
"$TVENV" tempest workspace remove --name tempest_sanity --rmdir
cd "$SANITY_DIR"
-}
-
-# Function to uninstall project
-function uninstall_project() {
- "$TVENV" pip uninstall -y "$SANITY_DIR"/openstack/"$1"
- # Check for *requirements.txt file in a project then uninstall it.
- if [ -e "$SANITY_DIR"/openstack/"$1"/*requirements.txt ]; then
- "$TVENV" pip uninstall -y -r "$SANITY_DIR"/openstack/"$1"/*requirements.txt
- fi
+ # Remove the sanity workspace in case of remaining
+ rm -fr "$SANITY_DIR"/tempest_sanity
# Remove the project directory after sanity run
- rm -fr "$SANITY_DIR"/openstack/"$1"
+ rm -fr "$SANITY_DIR"/"$1"
+
+ return $retval
}
# Function to run sanity check on each project
-function plugin_sanity_check() {
- clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
- && uninstall_project "$1" && "$TVENV" pip install .
+function plugin_sanity_check {
+ prepare_workspace && \
+ clone_project "$1" && \
+ install_project "$1" && \
+ tempest_sanity "$1"
+
+ return $?
}
# Log status
@@ -117,10 +120,13 @@
# Remove blacklisted tempest plugins
if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
plugin_sanity_check $project && passed_plugin+=", $project" || \
- failed_plugin+=", $project"
+ failed_plugin+="$project, " > $SANITY_DIR/$project.txt
fi
done
+echo "Passed Plugins: $passed_plugin"
+echo "Failed Plugins: $failed_plugin"
+
# Check for failed status
if [[ -n $failed_plugin ]]; then
exit 1
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
new file mode 100755
index 0000000..2596395
--- /dev/null
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+#
+#
+# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that
+# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack
+# plugins are missing the required setting to listen on IPv6 address. This is run as part of
+# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6'
+# can expand the IPv6 verification specific to project by defining the new post-run script which
+# will run along with this base script.
+# If there are more common verification for IPv6 then we can always extent this script.
+
+# Keep track of the DevStack directory
+TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
+source $TOP_DIR/stackrc
+source $TOP_DIR/openrc admin admin
+
+function verify_devstack_ipv6_setting {
+ local _service_host=''
+ _service_host=$(echo $SERVICE_HOST | tr -d [])
+ local _host_ipv6=''
+ _host_ipv6=$(echo $HOST_IPV6 | tr -d [])
+ local _service_listen_address=''
+ _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
+ local _service_local_host=''
+ _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
+ if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
+ echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
+ exit 1
+ fi
+ is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
+ if [[ "$is_service_host_ipv6" != "True" ]]; then
+ echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
+ if [[ "$is_host_ipv6" != "True" ]]; then
+ echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
+ if [[ "$is_service_listen_address" != "True" ]]; then
+ echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
+ if [[ "$is_service_local_host" != "True" ]]; then
+ echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ echo "Devstack is properly configured with IPv6"
+ echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
+}
+
+function sanity_check_system_ipv6_enabled {
+ system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())')
+ if [[ $system_ipv6_enabled != "True" ]]; then
+ echo "IPv6 is disabled in system"
+ exit 1
+ fi
+ echo "IPv6 is enabled in system"
+}
+
+function verify_service_listen_address_is_ipv6 {
+ local endpoints_verified=False
+ local all_ipv6=True
+ endpoints=$(openstack endpoint list -f value -c URL)
+ for endpoint in ${endpoints}; do
+ local endpoint_address=''
+ endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}')
+ endpoint_address=$(echo $endpoint_address | tr -d [])
+ local is_endpoint_ipv6=''
+ is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
+ if [[ "$is_endpoint_ipv6" != "True" ]]; then
+ all_ipv6=False
+ echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
+ continue
+ fi
+ endpoints_verified=True
+ done
+ if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then
+ exit 1
+ fi
+ echo "All services deployed by devstack is on IPv6 endpoints"
+ echo $endpoints
+}
+
+#First thing to verify if system has IPv6 enabled or not
+sanity_check_system_ipv6_enabled
+#Verify whether devstack is configured properly with IPv6 setting
+verify_devstack_ipv6_setting
+#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6.
+verify_service_listen_address_is_ipv6
diff --git a/tox.ini b/tox.ini
index 65960b0..0477d6f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,18 +1,21 @@
[tox]
-envlist = pep8,py36,py27,pip-check-reqs
-minversion = 2.3.1
+envlist = pep8,py36,py38,bashate,pip-check-reqs
+minversion = 3.1.1
skipsdist = True
+ignore_basepython_conflict = True
[tempestenv]
+basepython = python3
sitepackages = False
setenv =
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
[testenv]
+basepython = python3
setenv =
VIRTUAL_ENV={envdir}
OS_LOG_CAPTURE=1
@@ -20,12 +23,12 @@
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=160
PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST GABBI_TEMPEST_PATH
usedevelop = True
install_command = pip install {opts} {packages}
whitelist_externals = *
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
@@ -48,9 +51,13 @@
coverage xml -o cover/coverage.xml
coverage report
+[testenv:debug]
+commands = oslo_debug_helper -t tempest/tests {posargs}
+
[testenv:all]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
# 'all' includes slow tests
setenv =
{[tempestenv]setenv}
@@ -58,22 +65,45 @@
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex {posargs}
+ tempest run --regex {posargs:''}
[testenv:all-plugin]
+# DEPRECATED
+# NOTE(andreaf) The all-plugin tox env uses sitepackages
+# so that plugins installed outsite of Tempest virtual environment
+# can be discovered. After the implementation during the Queens
+# release cycle of the goal of moving Tempest plugins in dedicated
+# git repos, this environment should not be used anymore. "all"
+# should be used instead with the appropriate regex filtering.
sitepackages = True
# 'all' includes slow tests
setenv =
{[tempestenv]setenv}
OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
+basepython = {[tempestenv]basepython}
+deps = {[tempestenv]deps}
+commands =
+ echo "WARNING: The all-plugin env is deprecated and will be removed"
+ echo "WARNING Please use the 'all' environment for Tempest plugins."
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex {posargs:''}
+
+[testenv:all-site-packages]
+sitepackages = True
+# 'all' includes slow tests
+setenv =
+ {[tempestenv]setenv}
+ OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
+basepython = {[tempestenv]basepython}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex {posargs}
+ tempest run --regex {posargs:''}
[testenv:full]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
@@ -87,6 +117,7 @@
[testenv:full-parallel]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select all tempest scenario and including the non slow api tests
@@ -94,9 +125,75 @@
find . -type f -name "*.pyc" -delete
tempest run --regex '(^tempest\.scenario.*)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
+[testenv:integrated-network]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select which tests to run and exclude the slow tag and
+# tests listed in blacklist file:
+commands =
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
+
+[testenv:integrated-compute]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select which tests to run and exclude the slow tag and
+# tests listed in blacklist file:
+commands =
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
+
+[testenv:integrated-placement]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select which tests to run and exclude the slow tag and
+# tests listed in blacklist file:
+commands =
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
+
+[testenv:integrated-storage]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select which tests to run and exclude the slow tag and
+# tests listed in blacklist file:
+commands =
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
+
+[testenv:integrated-object-storage]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select which tests to run and exclude the slow tag and
+# tests listed in blacklist file:
+commands =
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
+
[testenv:full-serial]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
@@ -109,6 +206,7 @@
[testenv:scenario]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select all scenario tests
@@ -119,6 +217,7 @@
[testenv:smoke]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands =
@@ -128,6 +227,7 @@
[testenv:smoke-serial]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# This is still serial because neutron doesn't work with parallel. See:
@@ -140,6 +240,7 @@
[testenv:slow-serial]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select the slow tagged tests to run serially:
@@ -147,9 +248,22 @@
find . -type f -name "*.pyc" -delete
tempest run --serial --regex '\[.*\bslow\b.*\]' {posargs}
+[testenv:ipv6-only]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# Run only smoke and ipv6 tests. This env is used to tests
+# the ipv6 deployments and basic tests run fine so that we can
+# verify that services listen on IPv6 address.
+commands =
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex '\[.*\bsmoke|ipv6|test_network_v6\b.*\]' {posargs}
+
[testenv:venv]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands = {posargs}
@@ -157,13 +271,14 @@
[testenv:venv-tempest]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands = {posargs}
[testenv:docs]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands =
@@ -171,21 +286,37 @@
sphinx-build -W -b html doc/source doc/build/html
whitelist_externals = rm
-[testenv:pep8]
+[testenv:pdf-docs]
+deps = {[testenv:docs]deps}
+whitelist_externals =
+ make
commands =
+ sphinx-build -W -b latex doc/source doc/build/pdf
+ make -C doc/build/pdf
+
+[testenv:pep8]
+deps =
+ -r{toxinidir}/test-requirements.txt
+ autopep8
+commands =
+ autopep8 --exit-code --max-line-length=79 --experimental --diff -r tempest setup.py
flake8 {posargs}
check-uuid
+[testenv:autopep8]
+deps = autopep8
+commands =
+ {toxinidir}/tools/format.sh
+
[testenv:uuidgen]
commands =
check-uuid --fix
[hacking]
-local-check-factory = tempest.hacking.checks.factory
import_exceptions = tempest.services
[flake8]
-# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved. For further detail see https://review.openstack.org/#/c/36788/
+# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved. For further detail see https://review.opendev.org/#/c/36788/
# E123 skipped because it is ignored by default in the default pep8
# E129 skipped because it is too limiting when combined with other rules
# W504 skipped because it is overeager and unnecessary
@@ -195,9 +326,29 @@
enable-extensions = H106,H203,H904
import-order-style = pep8
+[flake8:local-plugins]
+extension =
+ T102 = checks:import_no_clients_in_api_and_scenario_tests
+ T104 = checks:scenario_tests_need_service_tags
+ T105 = checks:no_setup_teardown_class_for_tests
+ T107 = checks:service_tags_not_in_module_path
+ T108 = checks:no_hyphen_at_end_of_rand_name
+ N322 = checks:no_mutable_default_args
+ T109 = checks:no_testtools_skip_decorator
+ T110 = checks:get_resources_on_service_clients
+ T111 = checks:delete_resources_on_service_clients
+ T112 = checks:dont_import_local_tempest_into_lib
+ T113 = checks:use_rand_uuid_instead_of_uuid4
+ T114 = checks:dont_use_config_in_tempest_lib
+ T115 = checks:dont_put_admin_tests_on_nonadmin_path
+ T116 = checks:unsupported_exception_attribute_PY3
+ T117 = checks:negative_test_attribute_always_applied_to_negative_tests
+paths =
+ ./tempest/hacking
+
[testenv:releasenotes]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands =
@@ -206,6 +357,19 @@
-b html releasenotes/source releasenotes/build/html
whitelist_externals = rm
+[testenv:bashate]
+# if you want to test out some changes you have made to bashate
+# against tempest, just set BASHATE_INSTALL_PATH=/path/... to your
+# modified bashate tree
+deps =
+ {env:BASHATE_INSTALL_PATH:bashate}
+whitelist_externals = bash
+commands = bash -c "find {toxinidir}/tools \
+ -not \( -type d -name .?\* -prune \) \
+ -type f \
+ -name \*.sh \
+ -print0 | xargs -0 bashate -v -eE005,E042 -i E006"
+
[testenv:pip-check-reqs]
# Do not install test-requirements as that will pollute the virtualenv for
# determining missing packages.