Merge "Modify the fake data"
diff --git a/.gitignore b/.gitignore
index 9767e52..8b6222e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,7 +31,7 @@
!.coveragerc
cover/
doc/source/_static/tempest.conf.sample
-doc/source/plugin-registry.rst
+doc/source/plugins/plugin-registry.rst
# Files created by releasenotes build
releasenotes/build
diff --git a/.zuul.yaml b/.zuul.yaml
index 0035f7c..87e277c 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -80,6 +80,7 @@
Integration test of IPv6-only deployments. This job runs
smoke and IPv6 relates tests only. Basic idea is to test
whether OpenStack Services listen on IPv6 addrress or not.
+ timeout: 10800
vars:
tox_envlist: ipv6-only
@@ -100,6 +101,7 @@
devstack_localrc:
ENABLE_FILE_INJECTION: true
ENABLE_VOLUME_MULTIATTACH: true
+ USE_PYTHON3: False
devstack_services:
# NOTE(mriedem): Disable the cinder-backup service from tempest-full
# since tempest-full is in the integrated-gate project template but
@@ -138,6 +140,9 @@
- opendev.org/openstack/oslo.utils
- opendev.org/openstack/oslo.versionedobjects
- opendev.org/openstack/oslo.vmware
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: True
- job:
name: tempest-full-parallel
@@ -146,11 +151,16 @@
branches:
- master
description: |
- Base integration test with Neutron networking and py27.
+ Base integration test with Neutron networking.
It includes all scenarios as it was in the past.
This job runs all scenario tests in parallel!
+ timeout: 9000
vars:
tox_envlist: full-parallel
+ run_tempest_cleanup: true
+ run_tempest_dry_cleanup: true
+ devstack_localrc:
+ USE_PYTHON3: True
- job:
name: tempest-full-py3
@@ -332,6 +342,13 @@
nodeset: openstack-two-node-bionic
# This job runs on Bionic from stable/stein on.
branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
- job:
name: tempest-multinode-full
@@ -345,6 +362,13 @@
- stable/pike
- stable/queens
- stable/rocky
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
- job:
name: tempest-multinode-full-py3
@@ -426,9 +450,9 @@
USE_PYTHON3: true
- job:
- name: tempest-full-train
- parent: tempest-full
- override-checkout: stable/train
+ name: tempest-full-ussuri-py3
+ parent: tempest-full-py3
+ override-checkout: stable/ussuri
- job:
name: tempest-full-train-py3
@@ -436,28 +460,11 @@
override-checkout: stable/train
- job:
- name: tempest-full-stein
- parent: tempest-full
- override-checkout: stable/stein
-
-- job:
name: tempest-full-stein-py3
parent: tempest-full-py3
override-checkout: stable/stein
- job:
- name: tempest-full-rocky
- parent: tempest-full
- nodeset: openstack-single-node-xenial
- override-checkout: stable/rocky
-
-- job:
- name: tempest-full-rocky-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-xenial
- override-checkout: stable/rocky
-
-- job:
name: tempest-tox-plugin-sanity-check
parent: tox
description: |
@@ -511,12 +518,13 @@
name: tempest-pg-full
parent: tempest-full
description: |
- Base integration test with Neutron networking and py27 and PostgreSQL.
+ Base integration test with Neutron networking and PostgreSQL.
Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
vars:
devstack_localrc:
ENABLE_FILE_INJECTION: true
DATABASE_TYPE: postgresql
+ USE_PYTHON3: True
- project-template:
name: integrated-gate-networking
@@ -526,11 +534,11 @@
run on neutron gate only.
check:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-networking
gate:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-networking
- project-template:
@@ -542,11 +550,11 @@
run on Nova gate only.
check:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-compute
gate:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-compute
- project-template:
@@ -558,11 +566,11 @@
run on Placement gate only.
check:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-placement
gate:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-placement
- project-template:
@@ -574,11 +582,11 @@
run on Cinder and Glance gate only.
check:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-storage
gate:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-storage
- project-template:
@@ -590,22 +598,19 @@
run on swift gate only.
check:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-object-storage
gate:
jobs:
- - grenade-py3
+ - grenade
- tempest-integrated-object-storage
- project:
templates:
- check-requirements
- - integrated-gate
- integrated-gate-py3
- openstack-cover-jobs
- - openstack-python-jobs
- - openstack-python35-jobs
- - openstack-python3-train-jobs
+ - openstack-python3-victoria-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
@@ -641,20 +646,12 @@
- tempest-full-py3-ipv6:
voting: false
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-train:
+ - tempest-full-ussuri-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-train-py3:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-stein:
- irrelevant-files: *tempest-irrelevant-files
- tempest-full-stein-py3:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-rocky:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-rocky-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-multinode-full:
- irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-tox-plugin-sanity-check:
@@ -673,19 +670,17 @@
# tools/ is not here since this relies on a script in tools/.
- tempest-ipv6-only:
irrelevant-files: *tempest-irrelevant-files-2
- - tempest-slow:
- irrelevant-files: *tempest-irrelevant-files
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
voting: false
irrelevant-files: *tempest-irrelevant-files
+ - devstack-plugin-ceph-tempest-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- - grenade-py3:
- irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest:
- voting: false
+ - grenade:
irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario001-tempest-centos-7:
voting: false
@@ -701,8 +696,6 @@
irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full:
- irrelevant-files: *tempest-irrelevant-files
- interop-tempest-consistency:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-test-account-py3:
@@ -719,9 +712,9 @@
irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full:
+ - tempest-full-py3:
irrelevant-files: *tempest-irrelevant-files
- - grenade-py3:
+ - grenade:
irrelevant-files: *tempest-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *tempest-irrelevant-files-2
@@ -739,20 +732,15 @@
irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-lvm-multibackend:
irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files: *tempest-irrelevant-files
- tempest-pg-full:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-py3-opensuse15:
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
- - tempest-full-train
+ - tempest-full-ussuri-py3
- tempest-full-train-py3
- - tempest-full-stein
- tempest-full-stein-py3
- - tempest-full-rocky
- - tempest-full-rocky-py3
periodic:
jobs:
- tempest-all
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index a89ad94..2300763 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,17 +1,19 @@
-If you would like to contribute to the development of OpenStack, you must
-follow the steps in this page:
+The source repository for this project can be found at:
- https://docs.openstack.org/infra/manual/developers.html
+ https://opendev.org/openstack/tempest
-If you already have a good understanding of how the system works and your
-OpenStack accounts are set up, you can skip to the development workflow
-section of this documentation to learn how changes to OpenStack should be
-submitted for review via the Gerrit tool:
+Pull requests submitted through GitHub are not monitored.
- https://docs.openstack.org/infra/manual/developers.html#development-workflow
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
-Pull requests submitted through GitHub will be ignored.
+ https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
-Bugs should be filed on Launchpad, not GitHub:
+Bugs should be filed on Launchpad:
https://bugs.launchpad.net/tempest
+
+For more specific information about contributing to this repository, see the
+Tempest contributor guide:
+
+ https://docs.openstack.org/tempest/latest/contributor/contributing.html
diff --git a/HACKING.rst b/HACKING.rst
index 204b3c7..95bcbb5 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -12,7 +12,6 @@
tempest/scenario tests
- [T104] Scenario tests require a services decorator
- [T105] Tests cannot use setUpClass/tearDownClass
-- [T106] vim configuration should not be kept in source files.
- [T107] Check that a service tag isn't in the module path
- [T108] Check no hyphen at the end of rand_name() argument
- [T109] Cannot use testtools.skip decorator; instead use
@@ -60,7 +59,7 @@
`relevant plugin projects`_.
.. _External Plugin Interface: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/tempest-external-plugin-interface.html
-.. _relevant plugin projects: https://docs.openstack.org/tempest/latest/plugin-registry.html#detected-plugins
+.. _relevant plugin projects: https://docs.openstack.org/tempest/latest/plugins/plugin-registry.html#detected-plugins
Exception Handling
------------------
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 2194dc4..9f38ada 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -3,6 +3,5 @@
# process, which may cause wedges in the gate later.
openstackdocstheme>=1.20.0 # Apache-2.0
reno>=2.5.0 # Apache-2.0
-sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD
-sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD
+sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index 7745594..5422af7 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1 +1,4 @@
redirectmatch 301 ^/developer/tempest/(.*) /tempest/latest/$1
+redirectmatch 301 ^/tempest/latest/plugin.html /tempest/latest/plugins/plugin.html
+redirectmatch 301 ^/tempest/latest/plugin-registry.html /tempest/latest/plugins/plugin-registry
+redirectmatch 301 ^/tempest/latest/#support-policy /tempest/latest/#stable-branch-support-policy
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 0000000..9c79a1f
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,59 @@
+============================
+So You Want to Contribute...
+============================
+
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+Below will cover the more project specific information you need to get started
+with Tempest.
+
+Communication
+~~~~~~~~~~~~~
+* IRC channel ``#openstack-qa`` at FreeNode
+* Mailing list (prefix subjects with ``[qa]`` for faster responses)
+ http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+~~~~~~~~~~~~~~~~~~~~~~~~
+Please refer to the `Tempest Core Team
+<https://review.opendev.org/#/admin/groups/42,members>`_ contacts.
+
+New Feature Planning
+~~~~~~~~~~~~~~~~~~~~
+If you want to propose a new feature please read `Feature Proposal Process`_
+Tempest features are tracked on `Launchpad BP <https://blueprints.launchpad.net/tempest>`_.
+
+Task Tracking
+~~~~~~~~~~~~~
+We track our tasks in `Launchpad <https://bugs.launchpad.net/tempest>`_.
+
+If you're looking for some smaller, easier work item to pick up and get started
+on, search for the 'low-hanging-fruit' tag.
+
+Reporting a Bug
+~~~~~~~~~~~~~~~
+You found an issue and want to make sure we are aware of it? You can do so on
+`Launchpad <https://bugs.launchpad.net/tempest/+filebug>`__.
+More info about Launchpad usage can be found on `OpenStack docs page
+<https://docs.openstack.org/contributors/common/task-tracking.html#launchpad>`_
+
+Getting Your Patch Merged
+~~~~~~~~~~~~~~~~~~~~~~~~~
+All changes proposed to the Tempest require two ``Code-Review +2`` votes from
+Tempest core reviewers before one of the core reviewers can approve the patch by
+giving ``Workflow +1`` vote. More detailed guidelines for reviewers are available
+at :doc:`../REVIEWING`.
+
+Project Team Lead Duties
+~~~~~~~~~~~~~~~~~~~~~~~~
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
+
+The Release Process for QA is documented in `QA Release Process
+<https://wiki.openstack.org/wiki/QA/releases>`_.
+
+.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions
diff --git a/doc/source/index.rst b/doc/source/index.rst
index ab994d1..f878888 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -56,6 +56,16 @@
supported_version
+For Contributors
+================
+
+* If you are a new contributor to Tempest please refer: :doc:`contributor/contributing`
+
+.. toctree::
+ :hidden:
+
+ contributor/contributing
+
Developers Guide
================
@@ -77,8 +87,31 @@
.. toctree::
:maxdepth: 2
- plugin
- plugin-registry
+ plugins/index
+
+Tempest & Plugins Compatible Version Policy
+-------------------------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ tempest_and_plugins_compatible_version_policy
+
+Stable Branch Support Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_support_policy
+
+Stable Branch Testing Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_testing_policy
Library
-------
@@ -88,14 +121,6 @@
library
-Support Policy
---------------
-
-.. toctree::
- :maxdepth: 2
-
- stable_branch_support_policy
-
Search
======
@@ -104,4 +129,4 @@
* :ref:`Tempest document search <search>`: Search the contents of this document.
* `OpenStack wide search <https://docs.openstack.org>`_: Search the wider
- set of OpenStack documentation, including forums.
\ No newline at end of file
+ set of OpenStack documentation, including forums.
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index b4f06e3..50fd4f2 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -414,6 +414,10 @@
.. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
+ * `2.73`_
+
+ .. _2.73: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id66
+
* Volume
* `3.3`_
diff --git a/doc/source/plugins/index.rst b/doc/source/plugins/index.rst
new file mode 100644
index 0000000..f961ac7
--- /dev/null
+++ b/doc/source/plugins/index.rst
@@ -0,0 +1,40 @@
+=====================
+Tempest Plugins Guide
+=====================
+
+.. toctree::
+ :maxdepth: 2
+
+ plugin
+
+Stable Branch Support Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ ../stable_branch_support_policy
+
+Stable Branch Testing Policy
+----------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ ../stable_branch_testing_policy
+
+Tempest & Plugins Compatible Version Policy
+-------------------------------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ ../tempest_and_plugins_compatible_version_policy
+
+Plugins Registry
+----------------
+
+.. toctree::
+ :maxdepth: 2
+
+ plugin-registry
diff --git a/doc/source/plugin.rst b/doc/source/plugins/plugin.rst
similarity index 100%
rename from doc/source/plugin.rst
rename to doc/source/plugins/plugin.rst
diff --git a/doc/source/stable_branch_testing_policy.rst b/doc/source/stable_branch_testing_policy.rst
new file mode 100644
index 0000000..02c5338
--- /dev/null
+++ b/doc/source/stable_branch_testing_policy.rst
@@ -0,0 +1,33 @@
+Stable Branch Testing Policy
+============================
+
+Tempest and its plugins need to support the stable branches
+as per :doc:`Stable Branch Support Policy </stable_branch_support_policy>`.
+
+Because of branchless model of Tempest and plugins, all the supported
+stable branches use the Tempest and plugins master version for their
+testing. That is done in devstack by using the `master branch
+<https://opendev.org/openstack/devstack/src/commit/c104afec7dd72edfd909847bee9c14eaf077a28b/stackrc#L314>`_
+for the Tempest installation. To make sure the master version of Tempest or
+plugins (for any changes or adding new tests) is compatible for all
+the supported stable branches testing, Tempest and its plugins need to
+add the stable branches job on the master gate. That way can test the stable
+branches against master code and can avoid breaking supported branches
+accidentally.
+
+Example:
+
+* `Stable jobs on Tempest master
+ <https://opendev.org/openstack/tempest/src/commit/e8f1876aa6772077f85f380677b30251c2454505/.zuul.yaml#L646-L651>`_.
+
+* `Stable job on neutron tempest plugins
+ <https://opendev.org/openstack/neutron-tempest-plugin/src/commit/4bc1b00213cf660648cad1916fe6497ac29b2e78/.zuul.yaml#L1427-L1428>`_
+
+Once any stable branch is moved to the `Extended Maintenance Phases`_
+and devstack start using the Tempest older version for that stable
+branch testing then we can remove that stable branch job from master
+gate.
+
+Example: https://review.opendev.org/#/c/722183/
+
+.. _Extended Maintenance Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#extended-maintenance
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 60e7b97..388b4cd 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -1,7 +1,7 @@
Supported OpenStack Releases and Python Versions
================================================
-This Document list the officially supported OpenStack releases
+This document lists the officially supported OpenStack releases
and python versions by Tempest.
Compatible OpenStack Releases
@@ -9,9 +9,9 @@
Tempest master supports the below OpenStack Releases:
+* Ussuri
* Train
* Stein
-* Rocky
For older OpenStack Release:
@@ -24,14 +24,13 @@
* https://releases.openstack.org/stein/index.html#stein-tempest
How to use Tempest tag on Extended Maintenance stable branch:
-* https://review.opendev.org/#/c/681950/
+
+* https://review.opendev.org/#/c/705098/
Supported Python Versions
-------------------------
Tempest master supports the below python versions:
-* Python 2.7
-* Python 3.5
* Python 3.6
* Python 3.7
diff --git a/doc/source/tempest_and_plugins_compatible_version_policy.rst b/doc/source/tempest_and_plugins_compatible_version_policy.rst
new file mode 100644
index 0000000..942b1bd
--- /dev/null
+++ b/doc/source/tempest_and_plugins_compatible_version_policy.rst
@@ -0,0 +1,54 @@
+Tempest and Plugins compatible version policy
+=============================================
+
+Tempest and its plugins are responsible for the integrated
+testing of OpenStack. These tools have two use cases:
+
+#. Testing upstream code at gate
+#. Testing Production Cloud
+
+Upstream code is tested by the master version of branchless Tempest & plugins
+for all supported stable branches in `Maintained phase`_.
+
+Production Cloud can be tested by using the compatible version or using
+master version. It depends on the testing strategy of cloud. To provide
+the compatible version of Tempest and its Plugins per OpenStack release,
+we started the coordinated release of all plugins and Tempest per OpenStack
+release.
+These versions are the first set of versions from Tempest and its Plugins to
+officially start the support of a particular OpenStack release. For example:
+OpenStack Train release first compatible versions `Tempest plugins version`_.
+
+Because of branchless nature of Tempest and its plugins, first version
+released during OpenStack release is not the last version to support that
+OpenStack release. This means the next (or master) versions can also be used
+for upstream testing as well as in production testing.
+
+Since the `Extended Maintenance policy`_ for stable branch, Tempest
+started releasing the ``end of support`` version once stable release
+is moved to EM state, which used to happen on EOL of stable release. This is
+the last compatible version of Tempest for the OpenStack release moved to EM.
+
+Because of branchless nature as explained above, we have a range of versions
+which can be considered a compatible version for particular OpenStack release.
+How we should release those versions is mentioned in the below table.
+
+ +-----------------------------+-----------------+------------------------------------+
+ | First compatible version -> | OpenStack 'XYZ' | <- Last compatible version |
+ +=============================+=================+====================================+
+ |This is the latest version | |This is the version released |
+ |released when OpenStack | |when OpenStack 'XYZ' is moved to |
+ |'XYZ' is released. | |EM state. Hash used for this should |
+ |Example: | |be the hash from master at the time |
+ |`Tempest plugins version`_ | |of branch is EM not the one used for|
+ | | |First compatible version |
+ +-----------------------------+-----------------+------------------------------------+
+
+Tempest & the Plugins should follow the above mentioned policy for the
+``First compatible version`` and the ``Last compatible version.``
+so that we provide the right set of compatible versions to Upstream as well as to
+Production Cloud testing.
+
+.. _Maintained phase: https://docs.openstack.org/project-team-guide/stable-branches.html#maintained
+.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
+.. _Tempest plugins version: https://releases.openstack.org/train/#tempest-plugins
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 5f87abd..7ee7411 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -12,8 +12,41 @@
# job provided by the gabbi-tempest plugin. It can be safely ignored
# if that plugin is not being used.
GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
- roles:
- - setup-tempest-run-dir
- - setup-tempest-data-dir
- - acl-devstack-files
- - run-tempest
+ tasks:
+ - name: Setup Tempest Run Directory
+ include_role:
+ name: setup-tempest-run-dir
+
+ - name: Setup Tempest Data Directory
+ include_role:
+ name: setup-tempest-data-dir
+
+ - name: ACL devstack files
+ include_role:
+ name: acl-devstack-files
+
+ - name: Run tempest cleanup init-saved-state
+ include_role:
+ name: tempest-cleanup
+ vars:
+ init_saved_state: true
+ when:
+ - run_tempest_dry_cleanup is defined
+ - run_tempest_cleanup is defined
+
+ - name: Run Tempest
+ include_role:
+ name: run-tempest
+
+ - name: Run tempest cleanup dry-run
+ include_role:
+ name: tempest-cleanup
+ vars:
+ dry_run: true
+ when:
+ - run_tempest_dry_cleanup is defined
+
+ - name: Run tempest cleanup
+ include_role:
+ name: tempest-cleanup
+ when: run_tempest_cleanup is defined
diff --git a/releasenotes/notes/account_generator-6eb03f664a448c35.yaml b/releasenotes/notes/account_generator-6eb03f664a448c35.yaml
new file mode 100644
index 0000000..ade632f
--- /dev/null
+++ b/releasenotes/notes/account_generator-6eb03f664a448c35.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove the deprecated CLI ``tempest-account-generator`` in favor of
+ ``tempest account-generator`` command.
+ You can use ``tempest account-generator`` CLI to generate the accounts
+ yaml file.
diff --git a/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml b/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml
new file mode 100644
index 0000000..a1bd4c5
--- /dev/null
+++ b/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - A new config option 'subnet_id' is added to section
+ 'network' to specify subnet which should be used for
+ allocation of IPs for VMs created during testing.
+ It should be used when the tested network contains more
+ than one subnet otherwise test of external connectivity
+ will fail. (Fixes bug #1856671)
diff --git a/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml b/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml
new file mode 100644
index 0000000..6b10937
--- /dev/null
+++ b/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Add the option --worker-file in ``tempest run`` command. This is to give
+ tempest more granularity to manually configure how the different sets of
+ tests can be grouped to run with the different worker. You can configure
+ tests regex to run under workers. You can also mix manual scheduling with
+ standard one by mentioning concurrency.
+ For example, the user can setup tempest to run with different concurrences,
+ to be used with different regexps.
diff --git a/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml b/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml
new file mode 100644
index 0000000..6bbb381
--- /dev/null
+++ b/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml
@@ -0,0 +1,8 @@
+---
+deprecations:
+ - |
+ The ``[compute-feature-enabled]/block_migrate_cinder_iscsi`` is deprecated
+ ahead of removal in a future release. Once removed the
+ ``[compute-feature-enabled]/block_migration_for_live_migration``
+ configurable will then be used to determine when to run block migration
+ based tests during live migration.
diff --git a/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
new file mode 100644
index 0000000..58b161f
--- /dev/null
+++ b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The config options ``CONF.compute.spice_console`` and ``CONF.compute.rdp_console``
+ are deprecated because test cases using them are removed.
+ We can add them back when adding the test cases again.
diff --git a/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml b/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml
new file mode 100644
index 0000000..a0ac244
--- /dev/null
+++ b/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Python 2.7 support has been dropped. Last release of Tempest
+ to support python 2.7 is Temepst 23.0.0. The minimum version of Python now
+ supported by Tempest is Python 3.6.
diff --git a/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml b/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml
new file mode 100644
index 0000000..99ef31e
--- /dev/null
+++ b/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml
@@ -0,0 +1,8 @@
+---
+prelude: >
+ Remove the support of python3.5.
+ Tempest, its plugins dependencies in ussuri cycle
+ are python-requires>=py3.6 which makes distro not
+ having python 3.6 to do hack to install py3.6 etc.
+ It time to drop the py3.5 from Tempest. Last supported
+ version of Tempest for py3.5 is 23.0.0.
diff --git a/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml b/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
index 6caeacd..56f160a 100644
--- a/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
+++ b/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
@@ -1,12 +1,12 @@
---
-prelude: >
+prelude: |
This is an intermediate release during the Ussuri development cycle to
mark the end of support for EM Queens in Tempest.
After this release, Tempest will support below OpenStack Releases:
- * Train
- * Stein
- * Rocky
+ * Train
+ * Stein
+ * Rocky
Current development of Tempest is for OpenStack Ussuri development
cycle.
diff --git a/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml b/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml
new file mode 100644
index 0000000..a058137
--- /dev/null
+++ b/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new attachments client library has been introduced for the volume
+ service.
+
+ Initially only the show_attachment API is provided. This API requires a
+ minimum volume API microversion of ``3.27``.
diff --git a/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml b/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml
new file mode 100644
index 0000000..936bf1f
--- /dev/null
+++ b/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Remove the deprecated argument ``os-tenant-name`` or ``OS_TENANT_NAME`` in favour of
+ ``os-project-name`` argument.
diff --git a/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml b/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml
new file mode 100644
index 0000000..37e56bb
--- /dev/null
+++ b/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml
@@ -0,0 +1,16 @@
+---
+prelude: >
+ This release is to tag the Tempest for OpenStack Ussuri release.
+ This release marks the start of Ussuri release support in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Ussuri
+ * Train
+ * Stein
+
+ Current development of Tempest is for OpenStack Victoria development
+ cycle. Every Tempest commit is also tested against master during
+ the Victoria cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Ussuri (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Ussuri release.
diff --git a/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml b/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml
new file mode 100644
index 0000000..ce401ff
--- /dev/null
+++ b/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove the deprecated CLI ``verify-tempest-config`` in favour of
+ ``tempest verify-config`` command.
+ You can use ``tempest verify-config`` CLI to verify the tempest
+ conf file.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index bfd8b2d..d8702f9 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ v24.0.0
v23.0.0
v22.1.0
v22.0.0
diff --git a/releasenotes/source/v24.0.0.rst b/releasenotes/source/v24.0.0.rst
new file mode 100644
index 0000000..8131975
--- /dev/null
+++ b/releasenotes/source/v24.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v24.0.0 Release Notes
+=====================
+
+.. release-notes:: 24.0.0 Release Notes
+ :version: 24.0.0
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
index 3724e0e..e3a0a0e 100644
--- a/roles/process-stackviz/tasks/main.yaml
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -17,13 +17,18 @@
when: not subunit_input.stat.exists
- name: Install stackviz
- pip:
- name: "file://{{ stackviz_archive.stat.path }}"
- virtualenv: /tmp/stackviz
- extra_args: -U
when:
- stackviz_archive.stat.exists
- subunit_input.stat.exists
+ block:
+ - include_role:
+ name: ensure-pip
+
+ - pip:
+ name: "file://{{ stackviz_archive.stat.path }}"
+ virtualenv: /tmp/stackviz
+ virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
+ extra_args: -U
- name: Deploy stackviz static html+js
command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index d4b253a..3643edb 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -1,5 +1,8 @@
Run Tempest
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
**Role Variables**
.. zuul:rolevar:: devstack_base_dir
@@ -20,14 +23,12 @@
It works only when used with some specific tox environments
('all', 'all-plugin'.)
- Multi-line and commented regexs can be achieved by doing this:
+ In the following example only api scenario and third party tests
+ will be executed.
::
vars:
- tempest_test_regex: |
- (?x) # Ignore comments and whitespaces
- # Line with only a comment.
- (tempest\.(api|scenario|thirdparty)).*$ # Run only api scenario and third party
+ tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
.. zuul:rolevar:: tempest_test_blacklist
@@ -48,14 +49,9 @@
It works only when used with some specific tox environments
('all', 'all-plugin'.)
- Multi-line and commented regexs can be achieved by doing this:
-
::
vars:
- tempest_black_regex: |
- (?x) # Ignore comments and whitespaces
- # Line with only a comment.
- (tempest.api.identity).*$
+ tempest_black_regex: (tempest.api.identity).*$
.. zuul:rolevar:: tox_extra_args
:default: ''
@@ -72,3 +68,16 @@
:default: ''
The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+ :default: ''
+
+ Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+ :default: ''
+
+ Environment variable to set for run-tempst task.
+
+ Env variables set in this variable will be combined with some more
+ defaults env variable set at runtime.
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index 79df3e1..5867b6c 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -4,3 +4,6 @@
tempest_black_regex: ''
tox_extra_args: ''
tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 24bd4db..1de3725 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -20,6 +20,22 @@
default_concurrency: "{{ num_cores|int // 2 }}"
when: num_cores|int > 3
+- name: Override target branch
+ set_fact:
+ target_branch: "{{ zuul.override_checkout }}"
+ when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/rocky
+ set_fact:
+ # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+ when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky"]
+
+- name: Set OS_TEST_TIMEOUT if requested
+ set_fact:
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+ when: tempest_test_timeout != ''
+
- when:
- tempest_test_blacklist is defined
block:
@@ -40,6 +56,7 @@
--black-regex={{tempest_black_regex|quote}}
args:
chdir: "{{devstack_base_dir}}/tempest"
+ register: tempest_run_result
become: true
become_user: tempest
- environment: '{{ {"OS_TEST_TIMEOUT": tempest_test_timeout} if tempest_test_timeout else {} }}'
+ environment: "{{ tempest_tox_environment }}"
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
new file mode 100644
index 0000000..70719ca
--- /dev/null
+++ b/roles/tempest-cleanup/README.rst
@@ -0,0 +1,33 @@
+Tempest cleanup
+===============
+
+Documentation regarding tempest cleanup can be found at the following
+link:
+https://docs.openstack.org/tempest/latest/cleanup.html
+
+When init_saved_state and dry_run variables are set to false, the role
+execution will run tempest cleanup which deletes resources not present in the
+saved_state.json file.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: init_saved_state
+ :default: false
+
+ When true, tempest cleanup --init-saved-state will be executed which
+ initializes the saved state of the OpenStack deployment and will output
+ a saved_state.json file containing resources from the deployment that will
+ be preserved from the cleanup command. This should be done prior to running
+ Tempest tests.
+
+.. zuul:rolevar:: dry_run
+ :default: false
+
+ When true, tempest cleanup creates a report (./dry_run.json) of the
+ resources that would be cleaned up if the role was ran with dry_run option
+ set to false.
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
new file mode 100644
index 0000000..fc1948a
--- /dev/null
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+init_saved_state: false
+dry_run: false
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
new file mode 100644
index 0000000..5444afc
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -0,0 +1,31 @@
+- when: init_saved_state
+ block:
+ - name: Run tempest cleanup init-saved-state
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --init-saved-state --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+
+ - name: Cat saved_state.json
+ command: cat "{{ devstack_base_dir }}/tempest/saved_state.json"
+
+- when: dry_run
+ block:
+ - name: Run tempest cleanup dry-run
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+
+ - name: Cat dry_run.json
+ command: cat "{{ devstack_base_dir }}/tempest/dry_run.json"
+
+- name: Run tempest cleanup
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+ when: not dry_run and not init_saved_state
diff --git a/setup.cfg b/setup.cfg
index 1e9b8e9..18427a2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,6 +6,7 @@
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/tempest/latest/
+python-requires = >=3.6
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
@@ -13,12 +14,12 @@
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3 :: Only
+ Programming Language :: Python :: Implementation :: CPython
[files]
packages =
@@ -28,8 +29,6 @@
[entry_points]
console_scripts =
- verify-tempest-config = tempest.cmd.verify_tempest_config:main
- tempest-account-generator = tempest.cmd.account_generator:main
tempest = tempest.cmd.main:main
skip-tracker = tempest.lib.cmd.skip_tracker:main
check-uuid = tempest.lib.cmd.check_uuid:run
@@ -50,5 +49,3 @@
oslo.config.opts =
tempest.config = tempest.config:list_opts
-[wheel]
-universal = 1
diff --git a/setup.py b/setup.py
index 566d844..f63cc23 100644
--- a/setup.py
+++ b/setup.py
@@ -16,14 +16,6 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index a6e0efa..d5adfed 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -144,6 +144,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
def test_aggregate_add_existent_host(self):
+ # Adding already existing host to aggregate should fail.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate = self._create_test_aggregate()
@@ -172,6 +173,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
def test_aggregate_remove_nonexistent_host(self):
+ # Removing not existing host from aggregate should fail.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.NotFound, self.client.remove_host,
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 1483c2e..f42f53a 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -46,6 +46,7 @@
@decorators.idempotent_id('8b4330e1-12c4-4554-9390-e6639971f086')
def test_create_flavor_with_int_id(self):
+ """Test creating flavor with id of type integer"""
flavor_id = data_utils.rand_int_id(start=1000)
new_flavor_id = self.create_flavor(ram=self.ram,
vcpus=self.vcpus,
@@ -55,6 +56,7 @@
@decorators.idempotent_id('94c9bb4e-2c2a-4f3c-bb1f-5f0daf918e6d')
def test_create_flavor_with_uuid_id(self):
+ """Test creating flavor with id of type uuid"""
flavor_id = data_utils.rand_uuid()
new_flavor_id = self.create_flavor(ram=self.ram,
vcpus=self.vcpus,
@@ -64,8 +66,11 @@
@decorators.idempotent_id('f83fe669-6758-448a-a85e-32d351f36fe0')
def test_create_flavor_with_none_id(self):
- # If nova receives a request with None as flavor_id,
- # nova generates flavor_id of uuid.
+ """Test creating flavor without id specified
+
+ If nova receives a request with None as flavor_id,
+ nova generates flavor_id of uuid.
+ """
flavor_id = None
new_flavor_id = self.create_flavor(ram=self.ram,
vcpus=self.vcpus,
@@ -75,8 +80,10 @@
@decorators.idempotent_id('8261d7b0-be58-43ec-a2e5-300573c3f6c5')
def test_create_flavor_verify_entry_in_list_details(self):
- # Create a flavor and ensure it's details are listed
- # This operation requires the user to have 'admin' role
+ """Create a flavor and ensure its details are listed
+
+ This operation requires the user to have 'admin' role
+ """
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
@@ -94,9 +101,10 @@
@decorators.idempotent_id('63dc64e6-2e79-4fdf-868f-85500d308d66')
def test_create_list_flavor_without_extra_data(self):
- # Create a flavor and ensure it is listed
- # This operation requires the user to have 'admin' role
+ """Create a flavor and ensure it is listed
+ This operation requires the user to have 'admin' role
+ """
def verify_flavor_response_extension(flavor):
# check some extensions for the flavor create/show/detail response
self.assertEqual(flavor['swap'], '')
@@ -134,10 +142,12 @@
@decorators.idempotent_id('be6cc18c-7c5d-48c0-ac16-17eaf03c54eb')
def test_list_non_public_flavor(self):
- # Create a flavor with os-flavor-access:is_public false.
- # The flavor should not be present in list_details as the
- # tenant is not automatically added access list.
- # This operation requires the user to have 'admin' role
+ """Create a flavor with os-flavor-access:is_public false.
+
+ The flavor should not be present in list_details as the
+ tenant is not automatically added access list.
+ This operation requires the user to have 'admin' role
+ """
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
@@ -156,7 +166,7 @@
@decorators.idempotent_id('bcc418ef-799b-47cc-baa1-ce01368b8987')
def test_create_server_with_non_public_flavor(self):
- # Create a flavor with os-flavor-access:is_public false
+ """Create a flavor with os-flavor-access:is_public false"""
flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
disk=self.disk,
is_public="False")
@@ -169,8 +179,10 @@
@decorators.idempotent_id('b345b196-bfbd-4231-8ac1-6d7fe15ff3a3')
def test_list_public_flavor_with_other_user(self):
- # Create a Flavor with public access.
- # Try to List/Get flavor with another user
+ """Create a Flavor with public access.
+
+ Try to List/Get flavor with another user
+ """
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
# Create the flavor
@@ -184,6 +196,7 @@
@decorators.idempotent_id('fb9cbde6-3a0e-41f2-a983-bdb0a823c44e')
def test_is_public_string_variations(self):
+ """Test creating public and non public flavors"""
flavor_name_not_public = data_utils.rand_name(self.flavor_name_prefix)
flavor_name_public = data_utils.rand_name(self.flavor_name_prefix)
@@ -215,6 +228,7 @@
@decorators.idempotent_id('3b541a2e-2ac2-4b42-8b8d-ba6e22fcd4da')
def test_create_flavor_using_string_ram(self):
+ """Test creating flavor with ram of type string"""
new_flavor_id = data_utils.rand_int_id(start=1000)
ram = "1024"
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 4d27a22..4c531b3 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -61,10 +61,13 @@
@decorators.idempotent_id('0b2f9d4b-1ca2-4b99-bb40-165d4bb94208')
def test_flavor_set_get_update_show_unset_keys(self):
- # Test to SET, GET, UPDATE, SHOW, UNSET flavor extra
- # spec as a user with admin privileges.
+ """Test flavor extra spec operations by admin user
+
+ Test to SET, GET, UPDATE, SHOW, UNSET flavor extra
+ spec as a user with admin privileges.
+ """
# Assigning extra specs values that are to be set
- specs = {"key1": "value1", "key2": "value2"}
+ specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
# SET extra specs to the flavor created in setUp
set_body = self.admin_flavors_client.set_flavor_extra_spec(
self.flavor['id'], **specs)['extra_specs']
@@ -74,30 +77,34 @@
self.flavor['id'])['extra_specs'])
self.assertEqual(get_body, specs)
- # UPDATE the value of the extra specs key1
- update_body = \
- self.admin_flavors_client.update_flavor_extra_spec(
- self.flavor['id'], "key1", key1="value")
- self.assertEqual({"key1": "value"}, update_body)
+ # UPDATE the value of the extra specs 'hw:numa_nodes'
+ update_body = self.admin_flavors_client.update_flavor_extra_spec(
+ self.flavor['id'], "hw:numa_nodes", **{'hw:numa_nodes': '2'})
+ self.assertEqual({'hw:numa_nodes': '2'}, update_body)
- # GET extra specs and verify the value of the key2
+ # GET extra specs and verify the value of the 'hw:cpu_policy'
# is the same as before
get_body = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor['id'])['extra_specs']
- self.assertEqual(get_body, {"key1": "value", "key2": "value2"})
+ self.assertEqual(
+ get_body, {'hw:numa_nodes': '2', 'hw:cpu_policy': 'shared'}
+ )
# UNSET extra specs that were set in this test
- self.admin_flavors_client.unset_flavor_extra_spec(self.flavor['id'],
- "key1")
- self.admin_flavors_client.unset_flavor_extra_spec(self.flavor['id'],
- "key2")
+ self.admin_flavors_client.unset_flavor_extra_spec(
+ self.flavor['id'], 'hw:numa_nodes'
+ )
+ self.admin_flavors_client.unset_flavor_extra_spec(
+ self.flavor['id'], 'hw:cpu_policy'
+ )
get_body = self.admin_flavors_client.list_flavor_extra_specs(
self.flavor['id'])['extra_specs']
self.assertEmpty(get_body)
@decorators.idempotent_id('a99dad88-ae1c-4fba-aeb4-32f898218bd0')
def test_flavor_non_admin_get_all_keys(self):
- specs = {"key1": "value1", "key2": "value2"}
+ """Test non admin user getting all flavor extra spec keys"""
+ specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
self.admin_flavors_client.set_flavor_extra_spec(self.flavor['id'],
**specs)
body = (self.flavors_client.list_flavor_extra_specs(
@@ -108,11 +115,15 @@
@decorators.idempotent_id('12805a7f-39a3-4042-b989-701d5cad9c90')
def test_flavor_non_admin_get_specific_key(self):
+ """Test non admin user getting specific flavor extra spec key"""
+ specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
body = self.admin_flavors_client.set_flavor_extra_spec(
- self.flavor['id'], key1="value1", key2="value2")['extra_specs']
- self.assertEqual(body['key1'], 'value1')
- self.assertIn('key2', body)
+ self.flavor['id'], **specs
+ )['extra_specs']
+ self.assertEqual(body['hw:numa_nodes'], '1')
+ self.assertIn('hw:cpu_policy', body)
+
body = self.flavors_client.show_flavor_extra_spec(
- self.flavor['id'], 'key1')
- self.assertEqual(body['key1'], 'value1')
- self.assertNotIn('key2', body)
+ self.flavor['id'], 'hw:numa_nodes')
+ self.assertEqual(body['hw:numa_nodes'], '1')
+ self.assertNotIn('hw:cpu_policy', body)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 5cde39e..721acca 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -64,70 +64,82 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d')
def test_flavor_non_admin_set_keys(self):
- # Test to SET flavor extra spec as a user without admin privileges.
+ """Test to SET flavor extra spec as a user without admin privileges"""
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.set_flavor_extra_spec,
self.flavor['id'],
- key1="value1", key2="value2")
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'})
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1ebf4ef8-759e-48fe-a801-d451d80476fb')
def test_flavor_non_admin_update_specific_key(self):
- # non admin user is not allowed to update flavor extra spec
+ """non admin user is not allowed to update flavor extra spec"""
body = self.admin_flavors_client.set_flavor_extra_spec(
- self.flavor['id'], key1="value1", key2="value2")['extra_specs']
- self.assertEqual(body['key1'], 'value1')
+ self.flavor['id'],
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
+ )['extra_specs']
+ self.assertEqual(body['hw:numa_nodes'], '1')
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.
update_flavor_extra_spec,
self.flavor['id'],
- 'key1',
- key1='value1_new')
+ 'hw:numa_nodes',
+ **{'hw:numa_nodes': '1'})
@decorators.attr(type=['negative'])
@decorators.idempotent_id('28f12249-27c7-44c1-8810-1f382f316b11')
def test_flavor_non_admin_unset_keys(self):
+ """non admin user is not allowed to unset flavor extra spec"""
self.admin_flavors_client.set_flavor_extra_spec(
- self.flavor['id'], key1="value1", key2="value2")
+ self.flavor['id'],
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
+ )
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.unset_flavor_extra_spec,
self.flavor['id'],
- 'key1')
+ 'hw:numa_nodes')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('440b9f3f-3c7f-4293-a106-0ceda350f8de')
def test_flavor_unset_nonexistent_key(self):
+ """Unsetting non existence flavor extra spec key should fail"""
self.assertRaises(lib_exc.NotFound,
self.admin_flavors_client.unset_flavor_extra_spec,
self.flavor['id'],
- 'nonexistent_key')
+ 'hw:cpu_thread_policy')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('329a7be3-54b2-48be-8052-bf2ce4afd898')
def test_flavor_get_nonexistent_key(self):
+ """Getting non existence flavor extra spec key should fail"""
self.assertRaises(lib_exc.NotFound,
self.flavors_client.show_flavor_extra_spec,
self.flavor['id'],
- "nonexistent_key")
+ 'hw:cpu_thread_policy')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('25b822b8-9f49-44f6-80de-d99f0482e5cb')
def test_flavor_update_mismatch_key(self):
- # the key will be updated should be match the key in the body
+ """Updating unmatched flavor extra spec key should fail
+
+ The key to be updated should match the key in the body
+ """
self.assertRaises(lib_exc.BadRequest,
self.admin_flavors_client.update_flavor_extra_spec,
self.flavor['id'],
- "key2",
- key1="value")
+ 'hw:numa_nodes',
+ **{'hw:cpu_policy': 'shared'})
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f5889590-bf66-41cc-b4b1-6e6370cfd93f')
def test_flavor_update_more_key(self):
- # there should be just one item in the request body
+ """Updating multiple flavor spec keys should fail
+
+ There should be just one item in the request body
+ """
self.assertRaises(lib_exc.BadRequest,
self.admin_flavors_client.update_flavor_extra_spec,
self.flavor['id'],
- "key1",
- key1="value",
- key2="value")
+ 'hw:numa_nodes',
+ **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'})
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index c246685..31fe2b5 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -29,11 +29,13 @@
@decorators.idempotent_id('9bfaf98d-e2cb-44b0-a07e-2558b2821e4f')
def test_list_hosts(self):
+ # Listing hosts.
hosts = self.client.list_hosts()['hosts']
self.assertGreaterEqual(len(hosts), 2, str(hosts))
@decorators.idempotent_id('5dc06f5b-d887-47a2-bb2a-67762ef3c6de')
def test_list_hosts_with_zone(self):
+ # Listing hosts with specified availability zone
self.useFixture(fixtures.LockFixture('availability_zone'))
hosts = self.client.list_hosts()['hosts']
host = hosts[0]
@@ -43,6 +45,7 @@
@decorators.idempotent_id('9af3c171-fbf4-4150-a624-22109733c2a6')
def test_list_hosts_with_a_blank_zone(self):
+ # Listing hosts with blank availability zone.
# If send the request with a blank zone, the request will be successful
# and it will return all the hosts list
hosts = self.client.list_hosts(zone='')['hosts']
@@ -50,6 +53,7 @@
@decorators.idempotent_id('c6ddbadb-c94e-4500-b12f-8ffc43843ff8')
def test_list_hosts_with_nonexistent_zone(self):
+ # Listing hosts with not existing availability zone.
# If send the request with a nonexistent zone, the request will be
# successful and no hosts will be returned
hosts = self.client.list_hosts(zone='xxx')['hosts']
@@ -57,6 +61,7 @@
@decorators.idempotent_id('38adbb12-aee2-4498-8aec-329c72423aa4')
def test_show_host_detail(self):
+ # Showing host details.
hosts = self.client.list_hosts()['hosts']
hosts = [host for host in hosts if host['service'] == 'compute']
diff --git a/tempest/api/compute/admin/test_hosts_negative.py b/tempest/api/compute/admin/test_hosts_negative.py
index 8a91ae2..e8733c8 100644
--- a/tempest/api/compute/admin/test_hosts_negative.py
+++ b/tempest/api/compute/admin/test_hosts_negative.py
@@ -39,18 +39,21 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('dd032027-0210-4d9c-860e-69b1b8deed5f')
def test_list_hosts_with_non_admin_user(self):
+ # Non admin user is not allowed to list hosts.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_hosts)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f')
def test_show_host_detail_with_nonexistent_hostname(self):
+ # Showing host detail with not existing hostname should fail.
self.assertRaises(lib_exc.NotFound,
self.client.show_host, 'nonexistent_hostname')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc')
def test_show_host_detail_with_non_admin_user(self):
+ # Non admin user is not allowed to show host details.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.show_host,
self.hostname)
@@ -58,6 +61,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e40c72b1-0239-4ed6-ba21-81a184df1f7c')
def test_update_host_with_non_admin_user(self):
+ # Non admin user is not allowed to update host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.update_host,
self.hostname,
@@ -67,7 +71,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
def test_update_host_with_invalid_status(self):
- # 'status' can only be 'enable' or 'disable'
+ # Updating host to invalid status should fail,
+ # 'status' can only be 'enable' or 'disable'.
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname,
@@ -77,7 +82,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ab1e230e-5e22-41a9-8699-82b9947915d4')
def test_update_host_with_invalid_maintenance_mode(self):
- # 'maintenance_mode' can only be 'enable' or 'disable'
+ # Updating host to invalid maintenance mode should fail,
+ # 'maintenance_mode' can only be 'enable' or 'disable'.
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname,
@@ -87,7 +93,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0cd85f75-6992-4a4a-b1bd-d11e37fd0eee')
def test_update_host_without_param(self):
- # 'status' or 'maintenance_mode' needed for host update
+ # Updating host without param should fail,
+ # 'status' or 'maintenance_mode' is needed for host update.
self.assertRaises(lib_exc.BadRequest,
self.client.update_host,
self.hostname)
@@ -95,6 +102,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('23c92146-2100-4d68-b2d6-c7ade970c9c1')
def test_update_nonexistent_host(self):
+ # Updating not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.update_host,
'nonexistent_hostname',
@@ -104,6 +112,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0d981ac3-4320-4898-b674-82b61fbb60e4')
def test_startup_nonexistent_host(self):
+ # Starting up not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.startup_host,
'nonexistent_hostname')
@@ -111,6 +120,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca')
def test_startup_host_with_non_admin_user(self):
+ # Non admin user is not allowed to startup host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.startup_host,
self.hostname)
@@ -118,6 +128,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9e637444-29cf-4244-88c8-831ae82c31b6')
def test_shutdown_nonexistent_host(self):
+ # Shutting down not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.shutdown_host,
'nonexistent_hostname')
@@ -125,6 +136,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a803529c-7e3f-4d3c-a7d6-8e1c203d27f6')
def test_shutdown_host_with_non_admin_user(self):
+ # Non admin user is not allowed to shutdown host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.shutdown_host,
self.hostname)
@@ -132,6 +144,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f86bfd7b-0b13-4849-ae29-0322e83ee58b')
def test_reboot_nonexistent_host(self):
+ # Rebooting not existing host should fail.
self.assertRaises(lib_exc.NotFound,
self.client.reboot_host,
'nonexistent_hostname')
@@ -139,6 +152,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('02d79bb9-eb57-4612-abf6-2cb38897d2f8')
def test_reboot_host_with_non_admin_user(self):
+ # Non admin user is not allowed to reboot host.
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reboot_host,
self.hostname)
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 9822c26..e45aac5 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -134,6 +134,7 @@
@decorators.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
+ # Searching for hypervisors by its name.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers, "No hypervisors found.")
hypers = self.client.search_hypervisor(
diff --git a/tempest/api/compute/admin/test_hypervisor_negative.py b/tempest/api/compute/admin/test_hypervisor_negative.py
index 0056376..723b93c 100644
--- a/tempest/api/compute/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/admin/test_hypervisor_negative.py
@@ -40,8 +40,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c136086a-0f67-4b2b-bc61-8482bd68989f')
def test_show_nonexistent_hypervisor(self):
+ # Showing not existing hypervisor should fail.
nonexistent_hypervisor_id = data_utils.rand_uuid()
-
self.assertRaises(
lib_exc.NotFound,
self.client.show_hypervisor,
@@ -50,6 +50,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('51e663d0-6b89-4817-a465-20aca0667d03')
def test_show_hypervisor_with_non_admin_user(self):
+ # Non admin user is not allowed to show hypervisor.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -61,6 +62,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e2b061bb-13f9-40d8-9d6e-d5bf17595849')
def test_get_hypervisor_stats_with_non_admin_user(self):
+ # Non admin user is not allowed to get hypervisor stats.
self.assertRaises(
lib_exc.Forbidden,
self.non_adm_client.show_hypervisor_statistics)
@@ -68,6 +70,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f60aa680-9a3a-4c7d-90e1-fae3a4891303')
def test_get_nonexistent_hypervisor_uptime(self):
+ # Getting uptime of not existing hypervisor should fail.
nonexistent_hypervisor_id = data_utils.rand_uuid()
self.assertRaises(
@@ -78,6 +81,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6c3461f9-c04c-4e2a-bebb-71dc9cb47df2')
def test_get_hypervisor_uptime_with_non_admin_user(self):
+ # Non admin user is not allowed to get hypervisor uptime.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -97,7 +101,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('dc02db05-e801-4c5f-bc8e-d915290ab345')
def test_get_hypervisor_list_details_with_non_admin_user(self):
- # List of hypervisor details and available services with non admin user
+ # Non admin user is not allowed to list hypervisor details.
self.assertRaises(
lib_exc.Forbidden,
self.non_adm_client.list_hypervisors, detail=True)
@@ -109,6 +113,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('2a0a3938-832e-4859-95bf-1c57c236b924')
def test_show_servers_with_non_admin_user(self):
+ # Non admin user is not allowed to show servers on hypervisor.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -120,6 +125,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('02463d69-0ace-4d33-a4a8-93d7883a2bba')
def test_show_servers_with_nonexistent_hypervisor(self):
+ # Showing servers on not existing hypervisor should fail.
nonexistent_hypervisor_id = data_utils.rand_uuid()
self.assertRaises(
@@ -130,6 +136,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5b6a6c79-5dc1-4fa5-9c58-9c8085948e74')
def test_search_hypervisor_with_non_admin_user(self):
+ # Non admin user is not allowed to search hypervisor.
hypers = self._list_hypervisors()
self.assertNotEmpty(hypers)
@@ -141,6 +148,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('19a45cc1-1000-4055-b6d2-28e8b2ec4faa')
def test_search_nonexistent_hypervisor(self):
+ # Searching not existing hypervisor should fail.
self.assertRaises(
lib_exc.NotFound,
self.client.search_hypervisor,
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 836b975..a845c72 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -30,6 +30,7 @@
class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
+ """Test live migration operations supported by admin user"""
# These tests don't attempt any SSH validation nor do they use
# floating IPs on the instance, so all we need is a network and
@@ -123,12 +124,14 @@
@decorators.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
def test_live_block_migration(self):
+ """Test live migrating an active server"""
self._test_live_migration()
@decorators.idempotent_id('1e107f21-61b2-4988-8f22-b196e938ab88')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
def test_live_block_migration_paused(self):
+ """Test live migrating a paused server"""
self._test_live_migration(state='PAUSED')
@testtools.skipUnless(CONF.compute_feature_enabled.
@@ -137,6 +140,7 @@
@decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
@utils.services('volume')
def test_volume_backed_live_migration(self):
+ """Test live migrating an active server booted from volume"""
self._test_live_migration(volume_backed=True)
@decorators.idempotent_id('e19c0cc6-6720-4ed8-be83-b6603ed5c812')
@@ -148,6 +152,7 @@
'Block Live migration not configured for iSCSI')
@utils.services('volume')
def test_iscsi_volume(self):
+ """Test live migrating a server with volume attached"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
target_host = self.get_host_other_than(server_id)
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 83f2e61..37f5aec 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -25,6 +25,7 @@
class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+ """Test migration operations supported by admin user"""
@classmethod
def setup_clients(cls):
@@ -33,14 +34,14 @@
@decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
def test_list_migrations(self):
- # Admin can get the migrations list
+ """Test admin user can get the migrations list"""
self.client.list_migrations()
@decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_list_migrations_in_flavor_resize_situation(self):
- # Admin can get the migrations list which contains the resized server
+ """Admin can get the migrations list containing the resized server"""
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
@@ -62,8 +63,11 @@
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert_deleted_flavor(self):
- # Tests that we can revert the resize on an instance whose original
- # flavor has been deleted.
+ """Test reverting resized server with original flavor deleted
+
+ Tests that we can revert the resize on an instance whose original
+ flavor has been deleted.
+ """
# First we have to create a flavor that we can delete so make a copy
# of the normal flavor from which we'd create a server.
@@ -137,10 +141,12 @@
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_cold_migration(self):
+ """Test cold migrating server and then confirm the migration"""
self._test_cold_migrate_server(revert=False)
@decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration not available.')
def test_revert_cold_migration(self):
+ """Test cold migrating server and then revert the migration"""
self._test_cold_migrate_server(revert=True)
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index 33b23b5..fb6376e 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -35,6 +35,7 @@
@decorators.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
def test_get_network(self):
+ """Test getting network from nova side"""
networks = self.client.list_networks()['networks']
if CONF.compute.fixed_network_name:
configured_network = [x for x in networks if x['label'] ==
@@ -56,6 +57,7 @@
@decorators.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
def test_list_all_networks(self):
+ """Test getting all networks from nova side"""
networks = self.client.list_networks()['networks']
# Check the configured network is in the list
if CONF.compute.fixed_network_name:
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index bebc8c5..f440428 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -23,7 +23,7 @@
class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
-
+ """Test creating servers on mutiple nodes with scheduler_hints."""
@classmethod
def resource_setup(cls):
super(ServersOnMultiNodesTest, cls).resource_setup()
@@ -65,6 +65,7 @@
compute.is_scheduler_filter_enabled("SameHostFilter"),
'SameHostFilter is not available.')
def test_create_servers_on_same_host(self):
+ """Test creating servers with hints 'same_host'"""
hints = {'same_host': self.server01}
server02 = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
@@ -76,6 +77,7 @@
compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_create_servers_on_different_hosts(self):
+ """Test creating servers with hints of single 'different_host'"""
hints = {'different_host': self.server01}
server02 = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
@@ -87,7 +89,7 @@
compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_create_servers_on_different_hosts_with_list_of_servers(self):
- # This scheduler-hint supports list of servers also.
+ """Test creating servers with hints of a list of 'different_host'"""
hints = {'different_host': [self.server01]}
server02 = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 73e191b..bf846e5 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -28,11 +28,13 @@
@decorators.idempotent_id('5be41ef4-53d1-41cc-8839-5c2a48a1b283')
def test_list_services(self):
+ # Listing nova services
services = self.client.list_services()['services']
self.assertNotEmpty(services)
@decorators.idempotent_id('f345b1ec-bc6e-4c38-a527-3ca2bc00bef5')
def test_get_service_by_service_binary_name(self):
+ # Listing nova services by binary name.
binary_name = 'nova-compute'
services = self.client.list_services(binary=binary_name)['services']
self.assertNotEmpty(services)
@@ -41,6 +43,7 @@
@decorators.idempotent_id('affb42d5-5b4b-43c8-8b0b-6dca054abcca')
def test_get_service_by_host_name(self):
+ # Listing nova services by host name.
services = self.client.list_services()['services']
host_name = services[0]['host']
services_on_host = [service for service in services if
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index d264829..033caa8 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -31,14 +31,18 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1126d1f8-266e-485f-a687-adc547492646')
def test_list_services_with_non_admin_user(self):
+ """Non admin user is not allowed to list nova services"""
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.list_services)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d0884a69-f693-4e79-a9af-232d15643bf7')
def test_get_service_by_invalid_params(self):
- # Expect all services to be returned when the request contains invalid
- # parameters.
+ """Test listing services by invalid filter should return all services
+
+ Expect all services to be returned when the request contains invalid
+ parameters.
+ """
services = self.client.list_services()['services']
services_xxx = (self.client.list_services(xxx='nova-compute')
['services'])
@@ -47,6 +51,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1e966d4a-226e-47c7-b601-0b18a27add54')
def test_get_service_by_invalid_service_and_valid_host(self):
+ """Test listing services by invalid service and valid host value"""
services = self.client.list_services()['services']
host_name = services[0]['host']
services = self.client.list_services(host=host_name,
@@ -56,6 +61,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('64e7e7fb-69e8-4cb6-a71d-8d5eb0c98655')
def test_get_service_with_valid_service_and_invalid_host(self):
+ """Test listing services by valid service and invalid host value"""
services = self.client.list_services()['services']
binary_name = services[0]['binary']
services = self.client.list_services(host='xxx',
@@ -79,6 +85,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('508671aa-c929-4479-bd10-8680d40dd0a6')
def test_enable_service_with_invalid_service_id(self):
+ """Test updating non existing service to status enabled"""
self.assertRaises(lib_exc.NotFound,
self.client.update_service,
service_id=self.fake_service_id,
@@ -87,6 +94,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a9eeeade-42b3-419f-87aa-c9342aa068cf')
def test_disable_service_with_invalid_service_id(self):
+ """Test updating non existing service to status disabled"""
self.assertRaises(lib_exc.NotFound,
self.client.update_service,
service_id=self.fake_service_id,
@@ -95,6 +103,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f46a9d91-1e85-4b96-8e7a-db7706fa2e9a')
def test_disable_log_reason_with_invalid_service_id(self):
+ """Test updating non existing service to disabled with reason"""
+
# disabled_reason requires that status='disabled' be provided.
self.assertRaises(lib_exc.NotFound,
self.client.update_service,
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 371b506..edcb1a7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -23,6 +23,7 @@
class TestVolumeSwapBase(base.BaseV2ComputeAdminTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 4a7f36f..7b0f48b 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -13,6 +13,7 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -22,6 +23,7 @@
class VolumesAdminNegativeTest(base.BaseV2ComputeAdminTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -57,3 +59,66 @@
self.admin_servers_client.update_attached_volume,
self.server['id'], volume['id'],
volumeId=nonexistent_volume)
+
+
+class UpdateMultiattachVolumeNegativeTest(base.BaseV2ComputeAdminTest):
+
+ min_microversion = '2.60'
+ volume_min_microversion = '3.27'
+
+ @classmethod
+ def skip_checks(cls):
+ super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.volume_multiattach:
+ raise cls.skipException('Volume multi-attach is not available.')
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
+ @utils.services('volume')
+ def test_multiattach_rw_volume_update_failure(self):
+
+ # Create two multiattach capable volumes.
+ vol1 = self.create_volume(multiattach=True)
+ vol2 = self.create_volume(multiattach=True)
+
+ # Create two instances.
+ server1 = self.create_test_server(wait_until='ACTIVE')
+ server2 = self.create_test_server(wait_until='ACTIVE')
+
+ # Attach vol1 to both of these instances.
+ vol1_attachment1 = self.attach_volume(server1, vol1)
+ vol1_attachment2 = self.attach_volume(server2, vol1)
+
+ # Assert that we now have two attachments.
+ vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+ self.assertEqual(2, len(vol1['attachments']))
+
+ # By default both of these attachments should have an attach_mode of
+ # read-write, assert that here to ensure the following calls to update
+ # the volume will be rejected.
+ for volume_attachment in vol1['attachments']:
+ attachment_id = volume_attachment['attachment_id']
+ attachment = self.attachments_client.show_attachment(
+ attachment_id)['attachment']
+ self.assertEqual('rw', attachment['attach_mode'])
+
+ # Assert that a BadRequest is raised when we attempt to update volume1
+ # to volume2 on server1 or server2.
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_servers_client.update_attached_volume,
+ server1['id'], vol1['id'], volumeId=vol2['id'])
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_servers_client.update_attached_volume,
+ server2['id'], vol1['id'], volumeId=vol2['id'])
+
+ # Fetch the volume 1 to check the current attachments.
+ vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+ vol1_attachment_ids = [a['id'] for a in vol1['attachments']]
+
+ # Assert that volume 1 is still attached to both server 1 and 2.
+ self.assertIn(vol1_attachment1['id'], vol1_attachment_ids)
+ self.assertIn(vol1_attachment2['id'], vol1_attachment_ids)
+
+ # Assert that volume 2 has no attachments.
+ vol2 = self.volumes_client.show_volume(vol2['id'])['volume']
+ self.assertEqual([], vol2['attachments'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 7c2d9d2..74570ce 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -52,12 +52,15 @@
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
- cfg_min_version = CONF.compute.min_microversion
- cfg_max_version = CONF.compute.max_microversion
- api_version_utils.check_skip_with_microversion(cls.min_microversion,
- cls.max_microversion,
- cfg_min_version,
- cfg_max_version)
+ api_version_utils.check_skip_with_microversion(
+ cls.min_microversion, cls.max_microversion,
+ CONF.compute.min_microversion, CONF.compute.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.volume_min_microversion, cls.volume_max_microversion,
+ CONF.volume.min_microversion, CONF.volume.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.placement_min_microversion, cls.placement_max_microversion,
+ CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
@@ -105,6 +108,8 @@
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
+ cls.attachments_client = cls.os_primary.attachments_client_latest
+ cls.snapshots_client = cls.os_primary.snapshots_client_latest
if CONF.service_available.glance:
if CONF.image_feature_enabled.api_v1:
cls.images_client = cls.os_primary.image_client
@@ -151,6 +156,14 @@
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
+ cls.volume_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.volume_min_microversion,
+ CONF.volume.min_microversion))
+ cls.placement_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.placement_min_microversion,
+ CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
@@ -215,7 +228,7 @@
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
- validation_resources=None, **kwargs):
+ validation_resources=None, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
@@ -227,6 +240,7 @@
:param volume_backed: Whether the instance is volume backed or not.
:param validation_resources: Dictionary of validation resources as
returned by `get_class_validation_resources`.
+ :param clients: Client manager, defaults to os_primary.
:param kwargs: Extra arguments are passed down to the
`compute.create_test_server` call.
"""
@@ -243,8 +257,11 @@
not tenant_network):
kwargs['networks'] = 'none'
+ if clients is None:
+ clients = cls.os_primary
+
body, servers = compute.create_test_server(
- cls.os_primary,
+ clients,
validatable,
validation_resources=validation_resources,
tenant_network=tenant_network,
@@ -255,11 +272,11 @@
# and then wait for all
for server in servers:
cls.addClassResourceCleanup(waiters.wait_for_server_termination,
- cls.servers_client, server['id'])
+ clients.servers_client, server['id'])
for server in servers:
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
- cls.servers_client.delete_server, server['id'])
+ clients.servers_client.delete_server, server['id'])
return body
@@ -476,7 +493,9 @@
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- compute_microversion=self.request_microversion))
+ compute_microversion=self.request_microversion,
+ volume_microversion=self.volume_request_microversion,
+ placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
@@ -542,11 +561,17 @@
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
- # On teardown detach the volume and wait for it to be available. This
- # is so we don't error out when trying to delete the volume during
- # teardown.
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.volumes_client, volume['id'], 'available')
+ # On teardown detach the volume and for multiattach volumes wait for
+ # the attachment to be removed. For non-multiattach volumes wait for
+ # the state of the volume to change to available. This is so we don't
+ # error out when trying to delete the volume during teardown.
+ if volume['multiattach']:
+ self.addCleanup(waiters.wait_for_volume_attachment_remove,
+ self.volumes_client, volume['id'],
+ attachment['id'])
+ else:
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
@@ -554,6 +579,25 @@
volume['id'], 'in-use')
return attachment
+ def create_volume_snapshot(self, volume_id, name=None, description=None,
+ metadata=None, force=False):
+ name = name or data_utils.rand_name(
+ self.__class__.__name__ + '-snapshot')
+ snapshot = self.snapshots_client.create_snapshot(
+ volume_id=volume_id,
+ force=force,
+ display_name=name,
+ description=description,
+ metadata=metadata)['snapshot']
+ self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
+ snapshot['id'])
+ self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot['id'], 'available')
+ snapshot = self.snapshots_client.show_snapshot(
+ snapshot['id'])['snapshot']
+ return snapshot
+
def assert_flavor_equal(self, flavor_id, server_flavor):
"""Check whether server_flavor equals to flavor.
@@ -612,8 +656,14 @@
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
- hosts = [svc['host'] for svc in svcs
- if svc['state'] == 'up' and svc['status'] == 'enabled']
+ hosts = []
+ for svc in svcs:
+ if svc['state'] == 'up' and svc['status'] == 'enabled':
+ if CONF.compute.compute_volume_common_az:
+ if svc['zone'] == CONF.compute.compute_volume_common_az:
+ hosts.append(svc['host'])
+ else:
+ hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 7cf26fb..ef33685 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -12,17 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-import testtools
+from tempest.lib import exceptions as lib_exceptions
CONF = config.CONF
class ImagesTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -51,12 +54,23 @@
# in task_state image_snapshot
self.addCleanup(waiters.wait_for_server_status, self.servers_client,
server['id'], 'ACTIVE')
- image = self.create_image_from_server(server['id'],
- wait_until='SAVING')
- self.client.delete_image(image['id'])
- msg = ('The image with ID {image_id} failed to be deleted'
- .format(image_id=image['id']))
- self.assertTrue(self.client.is_resource_deleted(image['id']), msg)
+ snapshot_name = data_utils.rand_name('test-snap')
+ try:
+ image = self.create_image_from_server(server['id'],
+ name=snapshot_name,
+ wait_until='SAVING')
+ self.client.delete_image(image['id'])
+ msg = ('The image with ID {image_id} failed to be deleted'
+ .format(image_id=image['id']))
+ self.assertTrue(self.client.is_resource_deleted(image['id']),
+ msg)
+ self.assertEqual(snapshot_name, image['name'])
+ except lib_exceptions.TimeoutException as ex:
+ # If timeout is reached, we don't need to check state,
+ # since, it wouldn't be a 'SAVING' state atleast and apart from
+ # it, this testcase doesn't have scope for other state transition
+ # Hence, skip the test.
+ raise self.skipException("This test is skipped because " + str(ex))
@decorators.idempotent_id('aaacd1d0-55a2-4ce8-818a-b5439df8adc9')
def test_create_image_from_stopped_server(self):
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 512c9d2..37f9be3 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -30,6 +30,7 @@
class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
def tearDown(self):
"""Terminate test instances created after a test is executed."""
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index df8da07..c1af6c7 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -86,12 +86,16 @@
# apparently not enough? Add cleanup here.
self.addCleanup(self.delete_server, server['id'])
self._wait_for_validation(server, validation_resources)
+ try:
+ fip = set([validation_resources['floating_ip']['ip']])
+ except KeyError:
+ fip = ()
ifs = (self.interfaces_client.list_interfaces(server['id'])
['interfaceAttachments'])
body = waiters.wait_for_interface_status(
self.interfaces_client, server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
- return server, ifs
+ return server, ifs, fip
class AttachInterfacesTestJSON(AttachInterfacesTestBase):
@@ -226,7 +230,7 @@
@decorators.idempotent_id('73fe8f02-590d-4bf1-b184-e9ca81065051')
@utils.services('network')
def test_create_list_show_delete_interfaces_by_network_port(self):
- server, ifs = self._create_server_get_interfaces()
+ server, ifs, _ = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
@@ -268,7 +272,7 @@
raise self.skipException("Only owner network supports "
"creating interface by fixed ip.")
- server, ifs = self._create_server_get_interfaces()
+ server, ifs, _ = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
@@ -354,9 +358,8 @@
not CONF.network.shared_physical_network):
raise self.skipException("Only owner network supports "
"creating interface by fixed ip.")
-
# Add and Remove the fixed IP to server.
- server, ifs = self._create_server_get_interfaces()
+ server, ifs, fip = self._create_server_get_interfaces()
original_interface_count = len(ifs) # This is the number of ports.
self.assertGreater(original_interface_count, 0)
# Get the starting list of IPs on the server.
@@ -369,6 +372,9 @@
self.assertEqual(1, len(addresses), addresses) # number of networks
# Keep track of the original addresses so we can know which IP is new.
original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
+ # Make sure the floating IP possibly assigned during
+ # server creation is always present in the set of original ips.
+ original_ips = set(original_ips).union(fip)
original_ip_count = len(original_ips)
self.assertGreater(original_ip_count, 0, addresses) # at least 1
network_id = ifs[0]['net_id']
@@ -376,40 +382,22 @@
# fixed IP on the same network (and same port since we only have one
# port).
self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
- # Wait for the ips count to increase by one.
- def _get_server_floating_ips():
- _floating_ips = []
- _server = self.os_primary.servers_client.show_server(
- server['id'])['server']
- for _ip_set in _server['addresses']:
- for _ip in _server['addresses'][_ip_set]:
- if _ip['OS-EXT-IPS:type'] == 'floating':
- _floating_ips.append(_ip['addr'])
- return _floating_ips
-
- def _wait_for_ip_increase():
+ def _wait_for_ip_change(expected_count):
_addresses = self.os_primary.servers_client.list_addresses(
server['id'])['addresses']
- _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
- LOG.debug("Wait for IP increase. All IPs still associated to "
+ _ips = set([addr['addr'] for addr in list(_addresses.values())[0]])
+ # Make sure possible floating ip is always present in the set.
+ _ips = _ips.union(fip)
+ LOG.debug("Wait for change of IPs. All IPs still associated to "
"the server %(id)s: %(ips)s",
{'id': server['id'], 'ips': _ips})
- if len(_ips) == original_ip_count + 1:
- return True
- elif len(_ips) == original_ip_count:
- return False
- # If not, lets remove any floating IP from the list and check again
- _fips = _get_server_floating_ips()
- _ips = [_ip for _ip in _ips if _ip not in _fips]
- LOG.debug("Wait for IP increase. Fixed IPs still associated to "
- "the server %(id)s: %(ips)s",
- {'id': server['id'], 'ips': _ips})
- return len(_ips) == original_ip_count + 1
+ return len(_ips) == expected_count
+ # Wait for the ips count to increase by one.
if not test_utils.call_until_true(
- _wait_for_ip_increase, CONF.compute.build_timeout,
- CONF.compute.build_interval):
+ _wait_for_ip_change, CONF.compute.build_timeout,
+ CONF.compute.build_interval, original_ip_count + 1):
raise lib_exc.TimeoutException(
'Timed out while waiting for IP count to increase.')
@@ -428,26 +416,8 @@
break
self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
# Wait for the interface count to decrease by one.
-
- def _wait_for_ip_decrease():
- _addresses = self.os_primary.servers_client.list_addresses(
- server['id'])['addresses']
- _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
- LOG.debug("Wait for IP decrease. All IPs still associated to "
- "the server %(id)s: %(ips)s",
- {'id': server['id'], 'ips': _ips})
- if len(_ips) == original_ip_count:
- return True
- # If not, lets remove any floating IP from the list and check again
- _fips = _get_server_floating_ips()
- _ips = [_ip for _ip in _ips if _ip not in _fips]
- LOG.debug("Wait for IP decrease. Fixed IPs still associated to "
- "the server %(id)s: %(ips)s",
- {'id': server['id'], 'ips': _ips})
- return len(_ips) == original_ip_count
-
if not test_utils.call_until_true(
- _wait_for_ip_decrease, CONF.compute.build_timeout,
- CONF.compute.build_interval):
+ _wait_for_ip_change, CONF.compute.build_timeout,
+ CONF.compute.build_interval, original_ip_count):
raise lib_exc.TimeoutException(
'Timed out while waiting for IP count to decrease.')
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 8aab574..8879369 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from json import decoder as json_decoder
+
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
@@ -110,7 +112,11 @@
max_microversion = '2.32'
def verify_device_metadata(self, md_json):
- md_dict = json.loads(md_json)
+ try:
+ md_dict = json.loads(md_json)
+ except (json_decoder.JSONDecodeError, TypeError):
+ return False
+
for d in md_dict['devices']:
if d['type'] == 'nic':
if d['mac'] == self.port1['mac_address']:
@@ -310,7 +316,11 @@
raise cls.skipException('Metadata API must be enabled')
def verify_device_metadata(self, md_json):
- md_dict = json.loads(md_json)
+ try:
+ md_dict = json.loads(md_json)
+ except (json_decoder.JSONDecodeError, TypeError):
+ return False
+
found_devices = [d['tags'][0] for d in md_dict['devices']
if d.get('tags')]
try:
@@ -358,7 +368,8 @@
validation_resources=validation_resources,
config_drive=config_drive_enabled,
name=data_utils.rand_name('device-tagging-server'),
- networks=[{'uuid': self.get_tenant_network()['id']}])
+ networks=[{'uuid': self.get_tenant_network()['id']}],
+ wait_until='ACTIVE')
self.addCleanup(self.delete_server, server['id'])
# NOTE(mgoddard): Get detailed server to ensure addresses are present
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index e176251..dcadace 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -23,6 +23,7 @@
@decorators.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
def test_multiple_create(self):
+ # Creating server with min_count=2, 2 servers will be created.
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
self.os_primary,
@@ -39,6 +40,8 @@
@decorators.idempotent_id('864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7')
def test_multiple_create_with_reservation_return(self):
+ # Creating multiple servers with return_reservation_id=True,
+ # reservation_id will be returned.
body = self.create_test_server(wait_until='ACTIVE',
min_count=1,
max_count=2,
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 422510f..6bdf83b 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -23,6 +23,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('daf29d8d-e928-4a01-9a8c-b129603f3fc0')
def test_min_count_less_than_one(self):
+ # Creating server with min_count=0 should fail.
invalid_min_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=invalid_min_count)
@@ -30,6 +31,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('999aa722-d624-4423-b813-0d1ac9884d7a')
def test_min_count_non_integer(self):
+ # Creating server with non-integer min_count should fail.
invalid_min_count = 2.5
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=invalid_min_count)
@@ -37,6 +39,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
def test_max_count_less_than_one(self):
+ # Creating server with max_count < 1 shoudld fail.
invalid_max_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
@@ -44,6 +47,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9c5698d1-d7af-4c80-b971-9d403135eea2')
def test_max_count_non_integer(self):
+ # Creating server with non-integer max_count should fail.
invalid_max_count = 2.5
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
@@ -51,6 +55,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('476da616-f1ef-4271-a9b1-b9fc87727cdf')
def test_max_count_less_than_min_count(self):
+ # Creating server with max_count less than min_count should fail.
min_count = 3
max_count = 2
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index ff50836..d477be0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -343,20 +343,27 @@
def test_resize_volume_backed_server_confirm(self):
# We have to create a new server that is volume-backed since the one
# from setUp is not volume-backed.
- server = self.create_test_server(
- volume_backed=True, wait_until='ACTIVE')
+ kwargs = {'volume_backed': True,
+ 'wait_until': 'ACTIVE'}
+ if CONF.validation.run_validation:
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ kwargs.update({'validatable': True,
+ 'validation_resources': validation_resources})
+ server = self.create_test_server(**kwargs)
+
# NOTE(mgoddard): Get detailed server to ensure addresses are present
# in fixed IP case.
server = self.servers_client.show_server(server['id'])['server']
+
self._test_resize_server_confirm(server['id'])
+
if CONF.compute_feature_enabled.console_output:
# Now do something interactive with the guest like get its console
# output; we don't actually care about the output,
# just that it doesn't raise an error.
self.client.get_console_output(server['id'])
if CONF.validation.run_validation:
- validation_resources = self.get_class_validation_resources(
- self.os_primary)
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 482ba09..5688af1 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -20,6 +20,7 @@
class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 6629794..3fa859e 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -25,6 +25,7 @@
class ServerRescueTestBase(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -103,3 +104,132 @@
# Delete Security group
self.servers_client.remove_security_group(self.rescued_server_id,
name=sg['name'])
+
+
+class BaseServerStableDeviceRescueTest(base.BaseV2ComputeTest):
+ create_default_network = True
+
+ @classmethod
+ def skip_checks(self):
+ super(BaseServerStableDeviceRescueTest, self).skip_checks()
+ if not CONF.compute_feature_enabled.rescue:
+ msg = "Server rescue not available."
+ raise self.skipException(msg)
+ if not CONF.compute_feature_enabled.stable_rescue:
+ msg = "Stable rescue not available."
+ raise self.skipException(msg)
+
+ def _create_server_and_rescue_image(self, hw_rescue_device=None,
+ hw_rescue_bus=None,
+ block_device_mapping_v2=None):
+ if block_device_mapping_v2:
+ server_id = self.create_test_server(
+ wait_until='ACTIVE',
+ block_device_mapping_v2=block_device_mapping_v2)['id']
+ else:
+ server_id = self.create_test_server(wait_until='ACTIVE')['id']
+
+ image_id = self.create_image_from_server(server_id,
+ wait_until='ACTIVE')['id']
+ if hw_rescue_bus:
+ self.images_client.update_image(
+ image_id, [dict(add='/hw_rescue_bus',
+ value=hw_rescue_bus)])
+ if hw_rescue_device:
+ self.images_client.update_image(
+ image_id, [dict(add='/hw_rescue_device',
+ value=hw_rescue_device)])
+ return server_id, image_id
+
+ def _test_stable_device_rescue(self, server_id, rescue_image_id):
+ self.servers_client.rescue_server(
+ server_id, rescue_image_ref=rescue_image_id)
+ waiters.wait_for_server_status(
+ self.servers_client, server_id, 'RESCUE')
+ self.servers_client.unrescue_server(server_id)
+ waiters.wait_for_server_status(
+ self.servers_client, server_id, 'ACTIVE')
+
+
+class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
+
+ @decorators.idempotent_id('947004c3-e8ef-47d9-9f00-97b74f9eaf96')
+ def test_stable_device_rescue_cdrom_ide(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='cdrom', hw_rescue_bus='ide')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
+ def test_stable_device_rescue_disk_virtio(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('12340157-6306-4745-bdda-cfa019908b48')
+ def test_stable_device_rescue_disk_scsi(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='scsi')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('647d04cf-ad35-4956-89ab-b05c5c16f30c')
+ def test_stable_device_rescue_disk_usb(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='usb')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
+ def test_stable_device_rescue_disk_virtio_with_volume_attached(self):
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio')
+ server = self.servers_client.show_server(server_id)['server']
+ volume = self.create_volume()
+ self.attach_volume(server, volume)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+
+class ServerBootFromVolumeStableRescueTest(BaseServerStableDeviceRescueTest):
+
+ min_microversion = '2.87'
+
+ @decorators.idempotent_id('48f123cb-922a-4065-8db6-b9a9074a556b')
+ def test_stable_device_rescue_bfv_blank_volume(self):
+ block_device_mapping_v2 = [{
+ "boot_index": "0",
+ "source_type": "blank",
+ "volume_size": "1",
+ "destination_type": "volume"}]
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio',
+ block_device_mapping_v2=block_device_mapping_v2)
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('e4636333-c928-40fc-98b7-70a23eef4224')
+ def test_stable_device_rescue_bfv_image_volume(self):
+ block_device_mapping_v2 = [{
+ "boot_index": "0",
+ "source_type": "image",
+ "volume_size": "1",
+ "uuid": CONF.compute.image_ref,
+ "destination_type": "volume"}]
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio',
+ block_device_mapping_v2=block_device_mapping_v2)
+ self._test_stable_device_rescue(server_id, rescue_image_id)
+
+ @decorators.idempotent_id('7fcc5d2c-130e-4750-95f5-7343f9d0a2f3')
+ def test_stable_device_rescue_bfv_snapshot_volume(self):
+ volume_id = self.create_volume()['id']
+ self.volumes_client.set_bootable_volume(volume_id, bootable=True)
+ snapshot_id = self.create_volume_snapshot(volume_id)['id']
+ block_device_mapping_v2 = [{
+ "boot_index": "0",
+ "source_type": "snapshot",
+ "volume_size": "1",
+ "uuid": snapshot_id,
+ "destination_type": "volume"}]
+ server_id, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio',
+ block_device_mapping_v2=block_device_mapping_v2)
+ self._test_stable_device_rescue(server_id, rescue_image_id)
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index 947706e..236ce7c 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -19,6 +19,7 @@
class EndPointsTestJSON(base.BaseIdentityV2AdminTest):
+ """Test keystone v2 endpoints"""
@classmethod
def resource_setup(cls):
@@ -51,6 +52,7 @@
@decorators.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
def test_list_endpoints(self):
+ """Test listing keystone endpoints"""
# Get a list of endpoints
fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
# Asserting LIST endpoints
@@ -62,6 +64,7 @@
@decorators.idempotent_id('9974530a-aa28-4362-8403-f06db02b26c1')
def test_create_list_delete_endpoint(self):
+ """Test creating, listing and deleting a keystone endpoint"""
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = self.endpoints_client.create_endpoint(
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index 73fddb7..7c3a6cc 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -22,6 +22,7 @@
class TestDefaultProjectId(base.BaseIdentityV3AdminTest):
+ """Test creating a token without project will default to user's project"""
@classmethod
def setup_credentials(cls):
@@ -35,11 +36,11 @@
self.domains_client.delete_domain(domain_id)
@testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
- 'Skipped because environment has an '
- 'immutable user source and solely '
- 'provides read-only access to users.')
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
@decorators.idempotent_id('d6110661-6a71-49a7-a453-b5e26640ff6d')
def test_default_project_id(self):
+ """Creating a token without project will default to user's project"""
# create a domain
dom_name = data_utils.rand_name('dom')
domain_body = self.domains_client.create_domain(
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index df0d79d..2dd1fe2 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -114,6 +114,13 @@
self.groups_client.add_group_user(group['id'], user['id'])
# list groups which user belongs to
user_groups = self.users_client.list_user_groups(user['id'])['groups']
+ # The `membership_expires_at` attribute is present when listing user
+ # group memberships, and is not an attribute of the groups themselves.
+ # Therefore we remove it from the comparison.
+ for g in user_groups:
+ if 'membership_expires_at' in g:
+ self.assertIsNone(g['membership_expires_at'])
+ del(g['membership_expires_at'])
self.assertEqual(sorted(groups, key=lambda k: k['name']),
sorted(user_groups, key=lambda k: k['name']))
self.assertEqual(2, len(user_groups))
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 5aec931..7bd0bcf 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -22,6 +22,7 @@
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ """Test listing keystone users"""
def _list_users_with_params(self, params, key, expected, not_expected):
# Helper method to list users filtered with params and
@@ -69,7 +70,7 @@
@decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
def test_list_user_domains(self):
- # List users with domain
+ """List users with domain"""
params = {'domain_id': self.domain['id']}
self._list_users_with_params(params, 'domain_id',
self.domain_enabled_user,
@@ -77,7 +78,7 @@
@decorators.idempotent_id('bff8bf2f-9408-4ef5-b63a-753c8c2124eb')
def test_list_users_with_not_enabled(self):
- # List the users with not enabled
+ """List the users with not enabled"""
params = {'enabled': False}
self._list_users_with_params(params, 'enabled',
self.non_domain_enabled_user,
@@ -85,7 +86,7 @@
@decorators.idempotent_id('c285bb37-7325-4c02-bff3-3da5d946d683')
def test_list_users_with_name(self):
- # List users with name
+ """List users with name"""
params = {'name': self.domain_enabled_user['name']}
# When domain specific drivers are enabled the operations
# of listing all users and listing all groups are not supported,
@@ -98,7 +99,7 @@
@decorators.idempotent_id('b30d4651-a2ea-4666-8551-0c0e49692635')
def test_list_users(self):
- # List users
+ """List users"""
# When domain specific drivers are enabled the operations
# of listing all users and listing all groups are not supported,
# they need a domain filter to be specified
@@ -120,7 +121,7 @@
@decorators.idempotent_id('b4baa3ae-ac00-4b4e-9e27-80deaad7771f')
def test_get_user(self):
- # Get a user detail
+ """Get a user detail"""
user = self.users_client.show_user(self.users[0]['id'])['user']
self.assertEqual(self.users[0]['id'], user['id'])
self.assertEqual(self.users[0]['name'], user['name'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 2908fc4..fb81d0a 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -19,13 +19,14 @@
class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
+ """Test keystone policies"""
def _delete_policy(self, policy_id):
self.policies_client.delete_policy(policy_id)
@decorators.idempotent_id('1a0ad286-2d06-4123-ab0d-728893a76201')
def test_list_policies(self):
- # Test to list policies
+ """Test to list keystone policies"""
policy_ids = list()
fetched_ids = list()
for _ in range(3):
@@ -46,7 +47,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('e544703a-2f03-4cf2-9b0f-350782fdb0d3')
def test_create_update_delete_policy(self):
- # Test to update policy
+ """Test to update keystone policy"""
blob = data_utils.rand_name('BlobName')
policy_type = data_utils.rand_name('PolicyType')
policy = self.policies_client.create_policy(blob=blob,
diff --git a/tempest/api/identity/admin/v3/test_project_tags.py b/tempest/api/identity/admin/v3/test_project_tags.py
index b7878a8..eed60af 100644
--- a/tempest/api/identity/admin/v3/test_project_tags.py
+++ b/tempest/api/identity/admin/v3/test_project_tags.py
@@ -25,6 +25,8 @@
class IdentityV3ProjectTagsTest(base.BaseIdentityV3AdminTest):
+ """Test keystone project tags"""
+
# NOTE: force_tenant_isolation is true in the base class by default but
# overridden to false here to allow test execution for clouds using the
# pre-provisioned credentials provider.
@@ -34,6 +36,7 @@
@testtools.skipUnless(CONF.identity_feature_enabled.project_tags,
'Project tags not available.')
def test_list_update_delete_project_tags(self):
+ """Test listing, updating and deleting of project tags"""
project = self.setup_test_project()
# Create a tag for testing.
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index 5afeb98..a649d27 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -20,6 +20,7 @@
class ServicesTestJSON(base.BaseIdentityV3AdminTest):
+ """Test keystone services"""
def _del_service(self, service_id):
# Used for deleting the services created in this class
@@ -31,6 +32,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('5193aad5-bcb7-411d-85b0-b3b61b96ef06')
def test_create_update_get_service(self):
+ """Test creating, updating and getting of keystone service"""
# Creating a Service
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
@@ -63,7 +65,7 @@
@decorators.idempotent_id('d1dcb1a1-2b6b-4da8-bbb8-5532ef6e8269')
def test_create_service_without_description(self):
- # Create a service only with name and type
+ """Create a keystone service only with name and type"""
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
service = self.services_client.create_service(
@@ -74,7 +76,7 @@
@decorators.idempotent_id('e55908e8-360e-439e-8719-c3230a3e179e')
def test_list_services(self):
- # Create, List, Verify and Delete Services
+ """Create, List, Verify and Delete Keystone Services"""
service_ids = list()
service_types = list()
for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 8955a93..31cbbac 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -27,6 +27,7 @@
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ """Test keystone users"""
@classmethod
def skip_checks(cls):
@@ -38,7 +39,7 @@
@decorators.idempotent_id('b537d090-afb9-4519-b95d-270b0708e87e')
def test_user_update(self):
- # Test case to check if updating of user attributes is successful.
+ """Test case to check if updating of user attributes is successful"""
# Creating first user
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
@@ -72,6 +73,7 @@
@decorators.idempotent_id('2d223a0e-e457-4a70-9fb1-febe027a0ff9')
def test_update_user_password(self):
+ """Test updating user password"""
# Creating User to check password updation
u_name = data_utils.rand_name('user')
original_password = data_utils.rand_password()
@@ -98,7 +100,7 @@
@decorators.idempotent_id('a831e70c-e35b-430b-92ed-81ebbc5437b8')
def test_list_user_projects(self):
- # List the projects that a user has access upon
+ """Test listing the projects that a user has access upon"""
assigned_project_ids = list()
fetched_project_ids = list()
u_project = self.setup_test_project()
@@ -141,7 +143,7 @@
@decorators.idempotent_id('c10dcd90-461d-4b16-8e23-4eb836c00644')
def test_get_user(self):
- # Get a user detail
+ """Test getting a user detail"""
user = self.setup_test_user()
fetched_user = self.users_client.show_user(user['id'])['user']
self.assertEqual(user['id'], fetched_user['id'])
@@ -150,6 +152,7 @@
'Security compliance not available.')
@decorators.idempotent_id('568cd46c-ee6c-4ab4-a33a-d3791931979e')
def test_password_history_not_enforced_in_admin_reset(self):
+ """Test setting same password when password history is not enforced"""
old_password = self.os_primary.credentials.password
user_id = self.os_primary.credentials.user_id
diff --git a/tempest/api/identity/v3/test_catalog.py b/tempest/api/identity/v3/test_catalog.py
index bc95f0d..ce6adf9 100644
--- a/tempest/api/identity/v3/test_catalog.py
+++ b/tempest/api/identity/v3/test_catalog.py
@@ -19,9 +19,11 @@
class IdentityCatalogTest(base.BaseIdentityV3Test):
+ """Test service's catalog type values"""
@decorators.idempotent_id('56b57ced-22b8-4127-9b8a-565dfb0207e2')
def test_catalog_standardization(self):
+ """Test that every service has a standard catalog type value"""
# https://opendev.org/openstack/service-types-authority
# /src/branch/master/service-types.yaml
standard_service_values = [{'name': 'keystone', 'type': 'identity'},
@@ -31,11 +33,9 @@
# next, we need to GET the catalog using the catalog client
catalog = self.non_admin_catalog_client.show_catalog()['catalog']
# get list of the service types present in the catalog
- catalog_services = []
- for service in catalog:
- catalog_services.append(service['type'])
+ catalog_services = [service['type'] for service in catalog]
for service in standard_service_values:
- # if service enabled, check if it has a standard typevalue
+ # if service enabled, check if it has a standard type value
if service['name'] == 'keystone' or\
getattr(CONF.service_available, service['name']):
self.assertIn(service['type'], catalog_services)
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index dbb8c58..7e13d7f 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -19,10 +19,12 @@
class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
+ """"Test image operations about image owner"""
@decorators.related_bug('1420008')
@decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e')
def test_create_image_owner_param(self):
+ """Test creating image with specified owner"""
# NOTE: Create image with owner different from tenant owner by
# using "owner" parameter requires an admin privileges.
random_id = data_utils.rand_uuid_hex()
@@ -35,6 +37,7 @@
@decorators.related_bug('1420008')
@decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553')
def test_update_image_owner_param(self):
+ """Test updating image owner"""
random_id_1 = data_utils.rand_uuid_hex()
image = self.admin_client.create_image(
container_format='bare', disk_format='raw', owner=random_id_1)
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 5a27a43..c4a3e0e 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -88,8 +88,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
- # Deletes an image by image_id
-
+ """Test deleting an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
@@ -110,8 +109,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
- # Updates an image by image_id
-
+ """Test updating an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
@@ -135,6 +133,7 @@
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
+ """Test deactivating and reactivating an image"""
# Create image
image_name = data_utils.rand_name('image')
image = self.create_image(name=image_name,
@@ -235,7 +234,7 @@
@decorators.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_list_no_params(self):
- # Simple test to see all fixture images returned
+ """Simple test to see all fixture images returned"""
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
@@ -244,25 +243,25 @@
@decorators.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
- # Test to get all images with a specific container_format
+ """Test to get all images with a specific container_format"""
params = {"container_format": self.test_data['container_format']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
- # Test to get all images with disk_format = raw
+ """Test to get all images with disk_format = raw"""
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
- # Test to get all images with visibility = private
+ """Test to get all images with visibility = private"""
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
- # Test to get all images by size
+ """Test to get all images by size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
@@ -272,7 +271,7 @@
@decorators.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
- # Test to get all images with size between 2000 to 3000
+ """Test to get all images with min size and max size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
@@ -290,13 +289,13 @@
@decorators.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
- # Test to get all active images
+ """Test to get all active images"""
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
- # Test to get images by limit
+ """Test to get images by limit"""
params = {"limit": 1}
images_list = self.client.list_images(params=params)['images']
@@ -305,7 +304,7 @@
@decorators.idempotent_id('e9a44b91-31c8-4b40-a332-e0a39ffb4dbb')
def test_list_image_param_owner(self):
- # Test to get images by owner
+ """Test to get images by owner"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
@@ -315,13 +314,13 @@
@decorators.idempotent_id('55c8f5f5-bfed-409d-a6d5-4caeda985d7b')
def test_list_images_param_name(self):
- # Test to get images by name
+ """Test to get images by name"""
params = {'name': self.test_data['name']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('aa8ac4df-cff9-418b-8d0f-dd9c67b072c9')
def test_list_images_param_tag(self):
- # Test to get images matching a tag
+ """Test to get images matching a tag"""
params = {'tag': self.test_data['tags'][0]}
images_list = self.client.list_images(params=params)['images']
# Validating properties of fetched images
@@ -336,24 +335,26 @@
@decorators.idempotent_id('eeadce49-04e0-43b7-aec7-52535d903e7a')
def test_list_images_param_sort(self):
+ """Test listing images sorting in descending order"""
params = {'sort': 'size:desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('9faaa0c2-c3a5-43e1-8f61-61c54b409a49')
def test_list_images_param_sort_key_dir(self):
+ """Test listing images sorting by size in descending order"""
params = {'sort_key': 'size', 'sort_dir': 'desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
- # Test to get image schema
+ """Test to get image schema"""
schema = "image"
body = self.schemas_client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
- # Test to get images schema
+ """Test to get images schema"""
schema = "images"
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
@@ -372,6 +373,7 @@
@decorators.idempotent_id('3fa50be4-8e38-4c02-a8db-7811bb780122')
def test_list_images_param_member_status(self):
+ """Test listing images by member_status and visibility"""
# Create an image to be shared using default visibility
image_file = six.BytesIO(data_utils.random_bytes(2048))
container_format = CONF.image.container_formats[0]
diff --git a/tempest/api/image/v2/test_images_member.py b/tempest/api/image/v2/test_images_member.py
index e19d8c8..bc67859 100644
--- a/tempest/api/image/v2/test_images_member.py
+++ b/tempest/api/image/v2/test_images_member.py
@@ -15,9 +15,11 @@
class ImagesMemberTest(base.BaseV2MemberImageTest):
+ """Test image members"""
@decorators.idempotent_id('5934c6ea-27dc-4d6e-9421-eeb5e045494a')
def test_image_share_accept(self):
+ """Test sharing and accepting an image"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -41,6 +43,7 @@
@decorators.idempotent_id('d9e83e5f-3524-4b38-a900-22abcb26e90e')
def test_image_share_reject(self):
+ """Test sharing and rejecting an image"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -57,6 +60,7 @@
@decorators.idempotent_id('a6ee18b9-4378-465e-9ad9-9a6de58a3287')
def test_get_image_member(self):
+ """Test getting image members after the image is accepted"""
image_id = self._create_image()
self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -75,6 +79,7 @@
@decorators.idempotent_id('72989bc7-2268-48ed-af22-8821e835c914')
def test_remove_image_member(self):
+ """Test removing image members after the image is accepted"""
image_id = self._create_image()
self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -89,10 +94,12 @@
@decorators.idempotent_id('634dcc3f-f6e2-4409-b8fd-354a0bb25d83')
def test_get_image_member_schema(self):
+ """Test getting image member schema"""
body = self.schemas_client.show_schema("member")
self.assertEqual("member", body['name'])
@decorators.idempotent_id('6ae916ef-1052-4e11-8d36-b3ae14853cbb')
def test_get_image_members_schema(self):
+ """Test getting image members schema"""
body = self.schemas_client.show_schema("members")
self.assertEqual("members", body['name'])
diff --git a/tempest/api/image/v2/test_images_member_negative.py b/tempest/api/image/v2/test_images_member_negative.py
index caa90f9..5f6f1ae 100644
--- a/tempest/api/image/v2/test_images_member_negative.py
+++ b/tempest/api/image/v2/test_images_member_negative.py
@@ -16,10 +16,12 @@
class ImagesMemberNegativeTest(base.BaseV2MemberImageTest):
+ """Negative tests of image members"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('b79efb37-820d-4cf0-b54c-308b00cf842c')
def test_image_share_invalid_status(self):
+ """Test updating image member status to invalid status should fail"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
@@ -32,6 +34,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('27002f74-109e-4a37-acd0-f91cd4597967')
def test_image_share_owner_cannot_accept(self):
+ """Test that image owner can't accept image shared to other member"""
image_id = self._create_image()
member = self.image_member_client.create_image_member(
image_id, member=self.alt_tenant_id)
diff --git a/tempest/api/image/v2/test_images_metadefs_resource_types.py b/tempest/api/image/v2/test_images_metadefs_resource_types.py
index c60b3f7..6867f2d 100644
--- a/tempest/api/image/v2/test_images_metadefs_resource_types.py
+++ b/tempest/api/image/v2/test_images_metadefs_resource_types.py
@@ -22,6 +22,7 @@
@decorators.idempotent_id('6f358a4e-5ef0-11e6-a795-080027d0d606')
def test_basic_meta_def_resource_type_association(self):
+ """Test image resource type associations"""
# Get the available resource types and use one resource_type
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index b4baf05..dc2bb96 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -36,7 +36,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81d9f')
def test_get_non_existent_image(self):
- # get the non-existent image
+ """Get the non-existent image"""
non_existent_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_image,
non_existent_id)
@@ -44,14 +44,14 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2562ad')
def test_get_image_null_id(self):
- # get image with image_id = NULL
+ """Get image with image_id = NULL"""
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.show_image, image_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e57fc127-7ba0-4693-92d7-1d8a05ebcba9')
def test_get_delete_deleted_image(self):
- # get and delete the deleted image
+ """Get and delete the deleted image"""
# create and delete image
image = self.client.create_image(name='test',
container_format='bare',
@@ -70,7 +70,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6fe40f1c-57bd-4918-89cc-8500f850f3de')
def test_delete_non_existing_image(self):
- # delete non-existent image
+ """Delete non-existent image"""
non_existent_image_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
non_existent_image_id)
@@ -78,7 +78,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('32248db1-ab88-4821-9604-c7c369f1f88c')
def test_delete_image_null_id(self):
- # delete image with image_id=NULL
+ """Delete image with image_id=NULL"""
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@@ -86,7 +86,10 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('292bd310-369b-41c7-a7a3-10276ef76753')
def test_register_with_invalid_container_format(self):
- # Negative tests for invalid data supplied to POST /images
+ """Create image with invalid container format
+
+ Negative tests for invalid data supplied to POST /images
+ """
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
name='test', container_format='wrong',
disk_format='vhd')
@@ -94,6 +97,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
def test_register_with_invalid_disk_format(self):
+ """Create image with invalid disk format"""
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
name='test', container_format='bare',
disk_format='wrong')
@@ -101,7 +105,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ab980a34-8410-40eb-872b-f264752f46e5')
def test_delete_protected_image(self):
- # Create a protected image
+ """Create a protected image"""
image = self.create_image(protected=True)
self.addCleanup(self.client.update_image, image['id'],
[dict(replace="/protected", value=False)])
diff --git a/tempest/api/image/v2/test_images_tags.py b/tempest/api/image/v2/test_images_tags.py
index 601826e..163063c 100644
--- a/tempest/api/image/v2/test_images_tags.py
+++ b/tempest/api/image/v2/test_images_tags.py
@@ -18,9 +18,11 @@
class ImagesTagsTest(base.BaseV2ImageTest):
+ """Test image tags"""
@decorators.idempotent_id('10407036-6059-4f95-a2cd-cbbbee7ed329')
def test_update_delete_tags_for_image(self):
+ """Test adding and deleting image tags"""
image = self.create_image(container_format='bare',
disk_format='raw',
visibility='private')
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 440fa36..2db4a74 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -19,11 +19,12 @@
class ImagesTagsNegativeTest(base.BaseV2ImageTest):
+ """Negative tests of image tags"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('8cd30f82-6f9a-4c6e-8034-c1b51fba43d9')
def test_update_tags_for_non_existing_image(self):
- # Update tag with non existing image.
+ """Update image tag with non existing image"""
tag = data_utils.rand_name('tag')
non_exist_image = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.add_image_tag,
@@ -32,7 +33,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('39c023a2-325a-433a-9eea-649bf1414b19')
def test_delete_non_existing_tag(self):
- # Delete non existing tag.
+ """Delete non existing image tag"""
image = self.create_image(container_format='bare',
disk_format='raw',
visibility='private'
diff --git a/tempest/api/image/v2/test_versions.py b/tempest/api/image/v2/test_versions.py
index 84f1068..ef91354 100644
--- a/tempest/api/image/v2/test_versions.py
+++ b/tempest/api/image/v2/test_versions.py
@@ -17,10 +17,12 @@
class VersionsTest(base.BaseV2ImageTest):
+ """Test image versions"""
@decorators.idempotent_id('659ea30a-a17c-4317-832c-0f68ed23c31d')
@decorators.attr(type='smoke')
def test_list_versions(self):
+ """Test listing image versions"""
versions = self.versions_client.list_versions()['versions']
expected_resources = ('id', 'links', 'status')
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index 85f6896..2b9719a 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -103,9 +103,10 @@
List tags.
Remove a tag.
- v2.0 of the Neutron API is assumed. The tag-ext extension allows users to
- set tags on the following resources: subnets, ports, routers and
- subnetpools.
+ v2.0 of the Neutron API is assumed. The tag-ext or standard-attr-tag
+ extension allows users to set tags on the following resources: subnets,
+ ports, routers and subnetpools.
+ from stein release the tag-ext has been renamed to standard-attr-tag
"""
# NOTE(felipemonteiro): The supported resource names are plural. Use
@@ -115,8 +116,12 @@
@classmethod
def skip_checks(cls):
super(TagsExtTest, cls).skip_checks()
- if not utils.is_extension_enabled('tag-ext', 'network'):
- msg = "tag-ext extension not enabled."
+ # Added condition to support backward compatiblity since
+ # tag-ext has been renamed to standard-attr-tag
+ if not (utils.is_extension_enabled('tag-ext', 'network') or
+ utils.is_extension_enabled('standard-attr-tag', 'network')):
+ msg = ("neither tag-ext nor standard-attr-tag extensions "
+ "are enabled.")
raise cls.skipException(msg)
@classmethod
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index f695f51..c57766e 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -113,7 +113,8 @@
self._delete_group_snapshot(group_snapshot)
group_snapshots = self.group_snapshots_client.list_group_snapshots()[
'group_snapshots']
- self.assertEmpty(group_snapshots)
+ self.assertNotIn((group_snapshot['name'], group_snapshot['id']),
+ [(m['name'], m['id']) for m in group_snapshots])
@decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
def test_create_group_from_group_snapshot(self):
diff --git a/tempest/api/volume/admin/test_volume_quota_classes.py b/tempest/api/volume/admin/test_volume_quota_classes.py
index 75dca41..ee52354 100644
--- a/tempest/api/volume/admin/test_volume_quota_classes.py
+++ b/tempest/api/volume/admin/test_volume_quota_classes.py
@@ -44,12 +44,10 @@
@decorators.idempotent_id('abb9198e-67d0-4b09-859f-4f4a1418f176')
def test_show_default_quota(self):
+ # response body is validated by schema
default_quotas = self.admin_quota_classes_client.show_quota_class_set(
'default')['quota_class_set']
- self.assertIn('id', default_quotas)
self.assertEqual('default', default_quotas.pop('id'))
- for key in QUOTA_KEYS:
- self.assertIn(key, default_quotas)
@decorators.idempotent_id('a7644c63-2669-467a-b00e-452dd5c5397b')
def test_update_default_quota(self):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 9e24176..c1ceeb7 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -92,15 +92,12 @@
'extra_specs': extra_specs,
'os-volume-type-access:is_public': True}
body = self.create_volume_type(**params)
- self.assertIn('name', body)
self.assertEqual(name, body['name'],
"The created volume_type name is not equal "
"to the requested name")
self.assertEqual(description, body['description'],
"The created volume_type_description name is "
"not equal to the requested name")
- self.assertIsNotNone(body['id'],
- "Field volume_type id is empty or not found.")
fetched_volume_type = self.admin_volume_types_client.show_volume_type(
body['id'])['volume_type']
self.assertEqual(name, fetched_volume_type['name'],
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 3e0deef..5bac3d8 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -23,6 +23,7 @@
class VolumesActionsTest(base.BaseVolumeAdminTest):
+ create_default_network = True
def _create_reset_and_force_delete_temp_volume(self, status=None):
# Create volume, reset volume status, and force delete temp volume
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 1bfd075..bcbcf43 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -30,6 +30,9 @@
tempest.test.BaseTestCase):
"""Base test case class for all Cinder API tests."""
+ # Set this to True in subclasses to create a default network. See
+ # https://bugs.launchpad.net/tempest/+bug/1844568
+ create_default_network = False
_api_version = 2
# if api_v2 is not enabled while api_v3 is enabled, the volume v2 classes
# should be transferred to volume v3 classes.
@@ -63,7 +66,9 @@
@classmethod
def setup_credentials(cls):
- cls.set_network_resources()
+ cls.set_network_resources(
+ network=cls.create_default_network,
+ subnet=cls.create_default_network)
super(BaseVolumeTest, cls).setup_credentials()
@classmethod
diff --git a/tempest/api/volume/test_versions.py b/tempest/api/volume/test_versions.py
index b602032..1e5c9de 100644
--- a/tempest/api/volume/test_versions.py
+++ b/tempest/api/volume/test_versions.py
@@ -17,12 +17,14 @@
class VersionsTest(base.BaseVolumeTest):
+ """Test cinder versions"""
_api_version = 3
@decorators.idempotent_id('77838fc4-b49b-4c64-9533-166762517369')
@decorators.attr(type='smoke')
def test_list_versions(self):
+ """Test listing cinder versions"""
# NOTE: The version data is checked on service client side
# with JSON-Schema validation. It is enough to just call
# the API here.
@@ -30,6 +32,7 @@
@decorators.idempotent_id('7f755ae2-caa9-4049-988c-331d8f7a579f')
def test_show_version(self):
+ "Test getting cinder version details"
# NOTE: The version data is checked on service client side
# with JSON-Schema validation. So we will loop through each
# version and call show version.
diff --git a/tempest/api/volume/test_volume_absolute_limits.py b/tempest/api/volume/test_volume_absolute_limits.py
index 00a3375..4d64a95 100644
--- a/tempest/api/volume/test_volume_absolute_limits.py
+++ b/tempest/api/volume/test_volume_absolute_limits.py
@@ -23,7 +23,7 @@
# NOTE(zhufl): This inherits from BaseVolumeAdminTest because
# it requires force_tenant_isolation=True, which need admin
# credentials to create non-admin users for the tests.
-class AbsoluteLimitsTests(base.BaseVolumeAdminTest): # noqa
+class AbsoluteLimitsTests(base.BaseVolumeAdminTest): # noqa: T115
# avoid existing volumes of pre-defined tenant
force_tenant_isolation = True
diff --git a/tempest/api/volume/test_volume_delete_cascade.py b/tempest/api/volume/test_volume_delete_cascade.py
index bb32c11..53f1bca 100644
--- a/tempest/api/volume/test_volume_delete_cascade.py
+++ b/tempest/api/volume/test_volume_delete_cascade.py
@@ -58,8 +58,11 @@
@decorators.idempotent_id('994e2d40-de37-46e8-b328-a58fba7e4a95')
def test_volume_delete_cascade(self):
- # The case validates the ability to delete a volume
- # with associated snapshots.
+ """Test deleting a volume with associated snapshots
+
+ The case validates the ability to delete a volume
+ with associated snapshots.
+ """
# Create a volume
volume = self.create_volume()
@@ -78,9 +81,12 @@
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'Skip because of Bug#1677525')
def test_volume_from_snapshot_cascade_delete(self):
- # The case validates the ability to delete a volume with
- # associated snapshot while there is another volume created
- # from that snapshot.
+ """Test deleting a volume with associated volume-associated snapshot
+
+ The case validates the ability to delete a volume with
+ associated snapshot while there is another volume created
+ from that snapshot.
+ """
# Create a volume
volume = self.create_volume()
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index d203b2d..2151168 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -20,6 +20,7 @@
class VolumesMetadataTest(base.BaseVolumeTest):
+ """Test volume metadata"""
@classmethod
def resource_setup(cls):
@@ -34,6 +35,7 @@
@decorators.idempotent_id('6f5b125b-f664-44bf-910f-751591fe5769')
def test_crud_volume_metadata(self):
+ """Test creating, getting, updating and deleting of volume metadata"""
# Create metadata for the volume
metadata = {"key1": "value1",
"key2": "value2",
@@ -71,6 +73,7 @@
@decorators.idempotent_id('862261c5-8df4-475a-8c21-946e50e36a20')
def test_update_show_volume_metadata_item(self):
+ """Test updating and getting single volume metadata item"""
# Update metadata item for the volume
metadata = {"key1": "value1",
"key2": "value2",
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4cdf898..3eb81f5 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -20,6 +20,7 @@
class VolumesTransfersTest(base.BaseVolumeTest):
+ """Test volume transfer"""
credentials = ['primary', 'alt', 'admin']
@@ -34,6 +35,7 @@
@decorators.idempotent_id('4d75b645-a478-48b1-97c8-503f64242f1a')
def test_create_get_list_accept_volume_transfer(self):
+ """Test creating, getting, listing and accepting of volume transfer"""
# Create a volume first
volume = self.create_volume()
self.addCleanup(self.delete_volume,
@@ -74,6 +76,7 @@
@decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
def test_create_list_delete_volume_transfer(self):
+ """Test creating, listing and deleting volume transfer"""
# Create a volume first
volume = self.create_volume()
self.addCleanup(self.delete_volume,
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index be5638e..9edffc6 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -25,6 +25,7 @@
class VolumesActionsTest(base.BaseVolumeTest):
+ create_default_network = True
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index ea39a21..eb54426 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -23,6 +23,7 @@
class VolumesCloneTest(base.BaseVolumeTest):
+ """Test volume clone"""
@classmethod
def skip_checks(cls):
@@ -44,6 +45,7 @@
@decorators.idempotent_id('9adae371-a257-43a5-9555-dc7c88e66e0e')
def test_create_from_volume(self):
+ """Test cloning a volume with increasing size"""
# Creates a volume from another volume passing a size different from
# the source volume.
src_size = CONF.volume.volume_size
@@ -58,6 +60,7 @@
@decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
@utils.services('image')
def test_create_from_bootable_volume(self):
+ """Test cloning a bootable volume"""
# Create volume from image
img_uuid = CONF.compute.image_ref
src_vol = self.create_volume(imageRef=img_uuid)
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index bba7a0b..4bfb166 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -22,6 +22,7 @@
class VolumesCloneNegativeTest(base.BaseVolumeTest):
+ """Negative tests of volume clone"""
@classmethod
def skip_checks(cls):
@@ -32,6 +33,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9adae371-a257-43a5-459a-dc7c88e66e0e')
def test_create_from_volume_decreasing_size(self):
+ """Test cloning a volume with decreasing size will fail"""
# Creates a volume from another volume passing a size different from
# the source volume.
src_size = CONF.volume.volume_size + 1
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index c3f44e2..041823d 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -28,9 +28,11 @@
class VolumesExtendTest(base.BaseVolumeTest):
+ """Test volume extend"""
@decorators.idempotent_id('9a36df71-a257-43a5-9555-dc7c88e66e0e')
def test_volume_extend(self):
+ """Test extend a volume"""
# Extend Volume Test.
volume = self.create_volume(imageRef=self.image_ref)
extend_size = volume['size'] * 2
@@ -45,6 +47,7 @@
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
"Cinder volume snapshots are disabled")
def test_volume_extend_when_volume_has_snapshot(self):
+ """Test extending a volume which has a snapshot"""
volume = self.create_volume()
self.create_snapshot(volume['id'])
@@ -60,6 +63,7 @@
class VolumesExtendAttachedTest(base.BaseVolumeTest):
"""Tests extending the size of an attached volume."""
+ create_default_network = True
# We need admin credentials for getting instance action event details. By
# default a non-admin can list and show instance actions if they own the
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 71db95c..ade2deb 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -27,6 +27,7 @@
class VolumesGetTest(base.BaseVolumeTest):
+ """Test getting volume info"""
def _volume_create_get_update_delete(self, **kwargs):
# Create a volume, Get it's details and Delete the volume
@@ -118,12 +119,14 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51')
def test_volume_create_get_update_delete(self):
+ """Test Create/Get/Update/Delete of a blank volume"""
self._volume_create_get_update_delete(size=CONF.volume.volume_size)
@decorators.attr(type='smoke')
@decorators.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
@utils.services('image')
def test_volume_create_get_update_delete_from_image(self):
+ """Test Create/Get/Update/Delete of a volume created from image"""
image = self.images_client.show_image(CONF.compute.image_ref)
min_disk = image['min_disk']
disk_size = max(min_disk, CONF.volume.volume_size)
@@ -134,12 +137,14 @@
@testtools.skipUnless(CONF.volume_feature_enabled.clone,
'Cinder volume clones are disabled')
def test_volume_create_get_update_delete_as_clone(self):
+ """Test Create/Get/Update/Delete of a cloned volume"""
origin = self.create_volume()
self._volume_create_get_update_delete(source_volid=origin['id'],
size=CONF.volume.volume_size)
class VolumesSummaryTest(base.BaseVolumeTest):
+ """Test volume summary"""
_api_version = 3
min_microversion = '3.12'
@@ -147,6 +152,7 @@
@decorators.idempotent_id('c4f2431e-4920-4736-9e00-4040386b6feb')
def test_show_volume_summary(self):
+ """Test showing volume summary"""
volume_summary = \
self.volumes_client.show_volume_summary()['volume-summary']
for key in ['total_size', 'total_count']:
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 72e7290..bf221e8 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -25,6 +25,7 @@
class VolumesSnapshotTestJSON(base.BaseVolumeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/volume/test_volumes_snapshots_list.py b/tempest/api/volume/test_volumes_snapshots_list.py
index 8a416ea..f4f039c 100644
--- a/tempest/api/volume/test_volumes_snapshots_list.py
+++ b/tempest/api/volume/test_volumes_snapshots_list.py
@@ -109,7 +109,7 @@
snap_list = self.snapshots_client.list_snapshots(
sort_key=sort_key, sort_dir=sort_dir)['snapshots']
self.assertNotEmpty(snap_list)
- if sort_key is 'display_name':
+ if sort_key == 'display_name':
sort_key = 'name'
# Note: On Cinder API, 'display_name' works as a sort key
# on a request, a volume name appears as 'name' on the response.
diff --git a/tempest/clients.py b/tempest/clients.py
index 6aed92e..1db93a0 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -263,6 +263,8 @@
self.volume_v3.MessagesClient())
self.volume_versions_client_latest = (
self.volume_v3.VersionsClient())
+ self.attachments_client_latest = (
+ self.volume_v3.AttachmentsClient())
# TODO(gmann): Below alias for service clients have been
# deprecated and will be removed in future. Start using the alias
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 7ea0099..b230615 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -46,7 +46,6 @@
Username ``--os-username`` OS_USERNAME
Password ``--os-password`` OS_PASSWORD
Project ``--os-project-name`` OS_PROJECT_NAME
-Tenant ``--os-tenant-name`` (depr.) OS_TENANT_NAME
Domain ``--os-domain-name`` OS_DOMAIN_NAME
======== ============================ ====================
@@ -75,9 +74,6 @@
* ``--os-project-name <auth-project-name>`` (Optional) Project to request
authorization on. Defaults to env[OS_PROJECT_NAME].
-* ``--os-tenant-name <auth-tenant-name>`` (Optional, deprecated) Tenant to
- request authorization on. Defaults to env[OS_TENANT_NAME].
-
* ``--os-domain-name <auth-domain-name>`` (Optional) Domain the user and
project belong to. Defaults to env[OS_DOMAIN_NAME].
@@ -100,7 +96,7 @@
To see help on specific argument, please do: ``tempest account-generator
[OPTIONS] <accounts_file.yaml> -h``.
"""
-import argparse
+
import os
import traceback
@@ -139,7 +135,7 @@
'dhcp': True}
admin_creds_dict = {'username': opts.os_username,
'password': opts.os_password}
- _project_name = opts.os_project_name or opts.os_tenant_name
+ _project_name = opts.os_project_name
if opts.identity_version == 3:
admin_creds_dict['project_name'] = _project_name
admin_creds_dict['domain_name'] = opts.os_domain_name or 'Default'
@@ -221,10 +217,6 @@
metavar='<auth-project-name>',
default=os.environ.get('OS_PROJECT_NAME'),
help='Defaults to env[OS_PROJECT_NAME].')
- parser.add_argument('--os-tenant-name',
- metavar='<auth-tenant-name>',
- default=os.environ.get('OS_TENANT_NAME'),
- help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os-domain-name',
metavar='<auth-domain-name>',
default=os.environ.get('OS_DOMAIN_NAME'),
@@ -256,21 +248,6 @@
help='Output accounts yaml file')
-def get_options():
- usage_string = ('tempest account-generator [-h] <ARG> ...\n\n'
- 'To see help on specific argument, do:\n'
- 'tempest account-generator <ARG> -h')
- parser = argparse.ArgumentParser(
- description=DESCRIPTION,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- usage=usage_string
- )
-
- _parser_add_args(parser)
- opts = parser.parse_args()
- return opts
-
-
class TempestAccountGenerator(command.Command):
def get_parser(self, prog_name):
@@ -280,7 +257,19 @@
def take_action(self, parsed_args):
try:
- main(parsed_args)
+ if parsed_args.config_file:
+ config.CONF.set_config_path(parsed_args.config_file)
+ setup_logging()
+ resources = []
+ for count in range(parsed_args.concurrency):
+ # Use N different cred_providers to obtain different
+ # sets of creds
+ cred_provider = get_credential_provider(parsed_args)
+ resources.extend(generate_resources(cred_provider,
+ parsed_args.admin))
+ dump_accounts(resources, parsed_args.identity_version,
+ parsed_args.accounts)
+
except Exception:
LOG.exception("Failure generating test accounts.")
traceback.print_exc()
@@ -288,30 +277,3 @@
def get_description(self):
return DESCRIPTION
-
-
-def main(opts=None):
- log_warning = False
- if not opts:
- log_warning = True
- opts = get_options()
- if opts.config_file:
- config.CONF.set_config_path(opts.config_file)
- setup_logging()
- if log_warning:
- LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
- "please use: 'tempest account-generator'")
- if opts.os_tenant_name:
- LOG.warning("'os-tenant-name' and 'OS_TENANT_NAME' are both "
- "deprecated, please use 'os-project-name' or "
- "'OS_PROJECT_NAME' instead")
- resources = []
- for count in range(opts.concurrency):
- # Use N different cred_providers to obtain different sets of creds
- cred_provider = get_credential_provider(opts)
- resources.extend(generate_resources(cred_provider, opts.admin))
- dump_accounts(resources, opts.identity_version, opts.accounts)
-
-
-if __name__ == "__main__":
- main()
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index c54b16b..0b96d9e 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -123,6 +123,16 @@
raise Exception(self.GOT_EXCEPTIONS)
def init(self, parsed_args):
+ # set new handler for logging to stdout, by default only INFO messages
+ # are logged to stdout
+ stdout_handler = logging.logging.StreamHandler()
+ # debug argument is defined in cliff already
+ if self.app_args.debug:
+ stdout_handler.level = logging.DEBUG
+ else:
+ stdout_handler.level = logging.INFO
+ LOG.handlers.append(stdout_handler)
+
cleanup_service.init_conf()
self.options = parsed_args
self.admin_mgr = clients.Manager(
@@ -149,7 +159,7 @@
self._load_json()
def _cleanup(self):
- print("Begin cleanup")
+ LOG.info("Begin cleanup")
is_dry_run = self.options.dry_run
is_preserve = not self.options.delete_tempest_conf_objects
is_save_state = False
@@ -167,7 +177,7 @@
'is_save_state': is_save_state}
project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
projects = project_service.list()
- print("Process %s projects" % len(projects))
+ LOG.info("Processing %s projects", len(projects))
# Loop through list of projects and clean them up.
for project in projects:
@@ -179,10 +189,12 @@
'is_preserve': is_preserve,
'is_save_state': is_save_state,
'got_exceptions': self.GOT_EXCEPTIONS}
+ LOG.info("Processing global services")
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
+ LOG.info("Processing services")
for service in self.resource_cleanup_services:
svc = service(self.admin_mgr, **kwargs)
svc.run()
@@ -193,7 +205,7 @@
indent=2, separators=(',', ': ')))
def _clean_project(self, project):
- print("Cleaning project: %s " % project['name'])
+ LOG.debug("Cleaning project: %s ", project['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
is_preserve = not self.options.delete_tempest_conf_objects
@@ -263,7 +275,7 @@
return 'Cleanup after tempest run'
def _init_state(self):
- print("Initializing saved state.")
+ LOG.info("Initializing saved state.")
data = {}
admin_mgr = self.admin_mgr
kwargs = {'data': data,
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 2b35ebf..84d2492 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -13,6 +13,7 @@
# under the License.
from oslo_log import log as logging
+from six.moves.urllib import parse as urllib
from tempest import clients
from tempest.common import credentials_factory as credentials
@@ -22,7 +23,7 @@
from tempest import config
from tempest.lib import exceptions
-LOG = logging.getLogger(__name__)
+LOG = logging.getLogger('tempest.cmd.cleanup')
CONF = config.CONF
CONF_FLAVORS = None
@@ -166,6 +167,7 @@
client = self.client
for snap in snaps:
try:
+ LOG.debug("Deleting Snapshot with id %s", snap['id'])
client.delete_snapshot(snap['id'])
except Exception:
LOG.exception("Delete Snapshot %s exception.", snap['id'])
@@ -203,6 +205,7 @@
servers = self.list()
for server in servers:
try:
+ LOG.debug("Deleting Server with id %s", server['id'])
client.delete_server(server['id'])
except Exception:
LOG.exception("Delete Server %s exception.", server['id'])
@@ -235,6 +238,7 @@
sgs = self.list()
for sg in sgs:
try:
+ LOG.debug("Deleting Server Group with id %s", sg['id'])
client.delete_server_group(sg['id'])
except Exception:
LOG.exception("Delete Server Group %s exception.", sg['id'])
@@ -272,6 +276,7 @@
for k in keypairs:
name = k['keypair']['name']
try:
+ LOG.debug("Deleting keypair %s", name)
client.delete_keypair(name)
except Exception:
LOG.exception("Delete Keypair %s exception.", name)
@@ -308,6 +313,7 @@
vols = self.list()
for v in vols:
try:
+ LOG.debug("Deleting volume with id %s", v['id'])
client.delete_volume(v['id'])
except Exception:
LOG.exception("Delete Volume %s exception.", v['id'])
@@ -331,6 +337,8 @@
def delete(self):
client = self.client
try:
+ LOG.debug("Deleting Volume Quotas for project with id %s",
+ self.project_id)
client.delete_quota_set(self.project_id)
except Exception:
LOG.exception("Delete Volume Quotas exception for 'project %s'.",
@@ -351,9 +359,11 @@
def delete(self):
client = self.client
try:
+ LOG.debug("Deleting Nova Quotas for project with id %s",
+ self.project_id)
client.delete_quota_set(self.project_id)
except Exception:
- LOG.exception("Delete Quotas exception for 'project %s'.",
+ LOG.exception("Delete Nova Quotas exception for 'project %s'.",
self.project_id)
def dry_run(self):
@@ -362,6 +372,27 @@
self.data['compute_quotas'] = quotas['absolute']
+class NetworkQuotaService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(NetworkQuotaService, self).__init__(kwargs)
+ self.client = manager.network_quotas_client
+
+ def delete(self):
+ client = self.client
+ try:
+ LOG.debug("Deleting Network Quotas for project with id %s",
+ self.project_id)
+ client.reset_quotas(self.project_id)
+ except Exception:
+ LOG.exception("Delete Network Quotas exception for 'project %s'.",
+ self.project_id)
+
+ def dry_run(self):
+ resp = [quota for quota in self.client.list_quotas()['quotas']
+ if quota['project_id'] == self.project_id]
+ self.data['network_quotas'] = resp
+
+
# Begin network service classes
class BaseNetworkService(BaseService):
def __init__(self, manager, **kwargs):
@@ -399,7 +430,7 @@
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
- LOG.debug("List count, %s Networks", networks)
+ LOG.debug("List count, %s Networks", len(networks))
return networks
def delete(self):
@@ -407,6 +438,7 @@
networks = self.list()
for n in networks:
try:
+ LOG.debug("Deleting Network with id %s", n['id'])
client.delete_network(n['id'])
except Exception:
LOG.exception("Delete Network %s exception.", n['id'])
@@ -441,6 +473,8 @@
flips = self.list()
for flip in flips:
try:
+ LOG.debug("Deleting Network Floating IP with id %s",
+ flip['id'])
client.delete_floatingip(flip['id'])
except Exception:
LOG.exception("Delete Network Floating IP %s exception.",
@@ -486,11 +520,14 @@
if net_info.is_router_interface_port(port)]
for port in ports:
try:
+ LOG.debug("Deleting port with id %s of router with id %s",
+ port['id'], rid)
client.remove_router_interface(rid, port_id=port['id'])
except Exception:
LOG.exception("Delete Router Interface exception for "
"'port %s' of 'router %s'.", port['id'], rid)
try:
+ LOG.debug("Deleting Router with id %s", rid)
client.delete_router(rid)
except Exception:
LOG.exception("Delete Router %s exception.", rid)
@@ -526,6 +563,8 @@
rules = self.list()
for rule in rules:
try:
+ LOG.debug("Deleting Metering Label Rule with id %s",
+ rule['id'])
client.delete_metering_label_rule(rule['id'])
except Exception:
LOG.exception("Delete Metering Label Rule %s exception.",
@@ -562,6 +601,7 @@
labels = self.list()
for label in labels:
try:
+ LOG.debug("Deleting Metering Label with id %s", label['id'])
client.delete_metering_label(label['id'])
except Exception:
LOG.exception("Delete Metering Label %s exception.",
@@ -602,6 +642,7 @@
ports = self.list()
for port in ports:
try:
+ LOG.debug("Deleting port with id %s", port['id'])
client.delete_port(port['id'])
except Exception:
LOG.exception("Delete Port %s exception.", port['id'])
@@ -643,6 +684,7 @@
secgroups = self.list()
for secgroup in secgroups:
try:
+ LOG.debug("Deleting security_group with id %s", secgroup['id'])
client.delete_security_group(secgroup['id'])
except Exception:
LOG.exception("Delete security_group %s exception.",
@@ -679,6 +721,7 @@
subnets = self.list()
for subnet in subnets:
try:
+ LOG.debug("Deleting subnet with id %s", subnet['id'])
client.delete_subnet(subnet['id'])
except Exception:
LOG.exception("Delete Subnet %s exception.", subnet['id'])
@@ -714,6 +757,7 @@
pools = self.list()
for pool in pools:
try:
+ LOG.debug("Deleting Subnet Pool with id %s", pool['id'])
client.delete_subnetpool(pool['id'])
except Exception:
LOG.exception("Delete Subnet Pool %s exception.", pool['id'])
@@ -742,8 +786,10 @@
if not self.is_save_state:
regions = [region for region in regions['regions'] if region['id']
not in self.saved_state_json['regions'].keys()]
+ LOG.debug("List count, %s Regions", len(regions))
return regions
else:
+ LOG.debug("List count, %s Regions", len(regions['regions']))
return regions['regions']
def delete(self):
@@ -751,6 +797,7 @@
regions = self.list()
for region in regions:
try:
+ LOG.debug("Deleting region with id %s", region['id'])
client.delete_region(region['id'])
except Exception:
LOG.exception("Delete Region %s exception.", region['id'])
@@ -792,6 +839,7 @@
flavors = self.list()
for flavor in flavors:
try:
+ LOG.debug("Deleting flavor with id %s", flavor['id'])
client.delete_flavor(flavor['id'])
except Exception:
LOG.exception("Delete Flavor %s exception.", flavor['id'])
@@ -814,7 +862,15 @@
def list(self):
client = self.client
- images = client.list_images(params={"all_tenants": True})['images']
+ response = client.list_images()
+ images = []
+ images.extend(response['images'])
+ while 'next' in response:
+ parsed = urllib.urlparse(response['next'])
+ marker = urllib.parse_qs(parsed.query)['marker'][0]
+ response = client.list_images(params={"marker": marker})
+ images.extend(response['images'])
+
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
@@ -829,6 +885,7 @@
images = self.list()
for image in images:
try:
+ LOG.debug("Deleting image with id %s", image['id'])
client.delete_image(image['id'])
except Exception:
LOG.exception("Delete Image %s exception.", image['id'])
@@ -872,6 +929,7 @@
users = self.list()
for user in users:
try:
+ LOG.debug("Deleting user with id %s", user['id'])
self.client.delete_user(user['id'])
except Exception:
LOG.exception("Delete User %s exception.", user['id'])
@@ -912,6 +970,7 @@
roles = self.list()
for role in roles:
try:
+ LOG.debug("Deleting role with id %s", role['id'])
self.client.delete_role(role['id'])
except Exception:
LOG.exception("Delete Role %s exception.", role['id'])
@@ -954,6 +1013,7 @@
projects = self.list()
for project in projects:
try:
+ LOG.debug("Deleting project with id %s", project['id'])
self.client.delete_project(project['id'])
except Exception:
LOG.exception("Delete project %s exception.", project['id'])
@@ -990,6 +1050,7 @@
domains = self.list()
for domain in domains:
try:
+ LOG.debug("Deleting domain with id %s", domain['id'])
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception:
@@ -1020,6 +1081,8 @@
project_associated_services.append(NovaQuotaService)
if IS_CINDER:
project_associated_services.append(VolumeQuotaService)
+ if IS_NEUTRON:
+ project_associated_services.append(NetworkQuotaService)
return project_associated_services
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index f9ca2c7..d82b6df 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -47,6 +47,42 @@
by removing unnecessary tests from a list file which is generated from
``--list-tests`` option.
+You can also use ``--worker-file`` option that let you pass a filepath to a
+worker yaml file, allowing you to manually schedule the tests run.
+For example, you can setup a tempest run with
+different concurrences to be used with different regexps.
+An example of worker file is showed below::
+
+ # YAML Worker file
+ - worker:
+ # you can have more than one regex per worker
+ - tempest.api.*
+ - neutron_tempest_tests
+ - worker:
+ - tempest.scenario.*
+
+This will run test matching with 'tempest.api.*' and 'neutron_tempest_tests'
+against worker 1. Run tests matching with 'tempest.scenario.*' under worker 2.
+
+You can mix manual scheduling with the standard scheduling mechanisms by
+concurrency field on a worker. For example::
+
+ # YAML Worker file
+ - worker:
+ # you can have more than one regex per worker
+ - tempest.api.*
+ - neutron_tempest_tests
+ concurrency: 3
+ - worker:
+ - tempest.scenario.*
+ concurrency: 2
+
+This will run tests matching with 'tempest.scenario.*' against 2 workers.
+
+This worker file is passed into stestr. For some more details on how it
+operates please refer to the stestr scheduling docs:
+https://stestr.readthedocs.io/en/stable/MANUAL.html#test-scheduling
+
Test Execution
==============
There are several options to control how the tests are executed. By default
@@ -185,6 +221,7 @@
blacklist_file=parsed_args.blacklist_file,
whitelist_file=parsed_args.whitelist_file,
black_regex=parsed_args.black_regex,
+ worker_path=parsed_args.worker_file,
load_list=parsed_args.load_list, combine=parsed_args.combine)
if return_code > 0:
sys.exit(return_code)
@@ -254,6 +291,10 @@
'on each newline. This command '
'supports files created by the tempest '
'run ``--list-tests`` command')
+ parser.add_argument('--worker-file', '--worker_file',
+ help='Optional path to a worker file. This file '
+ 'contains each worker configuration to be '
+ 'used to schedule the tests run')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index d25d3ca..8d5bdbd 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -433,11 +433,6 @@
def main(opts=None):
- print('Running config verification...')
- if opts is None:
- print("Use of: 'verify-tempest-config' is deprecated, "
- "please use: 'tempest verify-config'")
- opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
@@ -497,7 +492,3 @@
LOG.exception("Failure verifying configuration.")
traceback.print_exc()
raise
-
-
-if __name__ == "__main__":
- main()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index cd85ede..edb9d16 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -400,9 +400,24 @@
"""Upgrade the HTTP connection to a WebSocket and verify."""
# It is possible to pass the path as a query parameter in the request,
# so use it if present
+ # Given noVNC format
+ # https://x.com/vnc_auto.html?path=%3Ftoken%3Dxxx,
+ # url format is
+ # ParseResult(scheme='https', netloc='x.com',
+ # path='/vnc_auto.html', params='',
+ # query='path=%3Ftoken%3Dxxx', fragment='').
+ # qparams format is {'path': ['?token=xxx']}
qparams = urlparse.parse_qs(url.query)
- path = qparams['path'][0] if 'path' in qparams else '/websockify'
- reqdata = 'GET %s HTTP/1.1\r\n' % path
+ # according to references
+ # https://docs.python.org/3/library/urllib.parse.html
+ # https://tools.ietf.org/html/rfc3986#section-3.4
+ # qparams['path'][0] format is '?token=xxx' without / prefix
+ # remove / in /websockify to comply to references.
+ path = qparams['path'][0] if 'path' in qparams else 'websockify'
+ # Fix websocket request format by adding / prefix.
+ # Updated request format: GET /?token=xxx HTTP/1.1
+ # or GET /websockify HTTP/1.1
+ reqdata = 'GET /%s HTTP/1.1\r\n' % path
reqdata += 'Host: %s' % url.hostname
# Add port only if we have one specified
if url.port:
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 11f3bf9..14790d6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -124,6 +124,12 @@
raise lib_exc.DeleteErrorException(
"Server %s failed to delete and is in ERROR status" %
server_id)
+ if server_status == 'SOFT_DELETED':
+ # Soft-deleted instances need to be forcibly deleted to
+ # prevent some test cases from failing.
+ LOG.debug("Automatically force-deleting soft-deleted server %s",
+ server_id)
+ client.force_delete_server(server_id)
if int(time.time()) - start_time >= client.build_timeout:
raise lib_exc.TimeoutException
@@ -217,6 +223,22 @@
resource_name, resource_id, status, time.time() - start)
+def wait_for_volume_attachment_remove(client, volume_id, attachment_id):
+ """Waits for a volume attachment to be removed from a given volume."""
+ start = int(time.time())
+ attachments = client.show_volume(volume_id)['volume']['attachments']
+ while any(attachment_id == a['attachment_id'] for a in attachments):
+ time.sleep(client.build_interval)
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Failed to remove attachment %s from volume %s '
+ 'within the required time (%s s).' %
+ (attachment_id, volume_id, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+ attachments = client.show_volume(volume_id)['volume']['attachments']
+ LOG.info('Attachment %s removed from volume %s after waiting for %f '
+ 'seconds', attachment_id, volume_id, time.time() - start)
+
+
def wait_for_volume_migration(client, volume_id, new_host):
"""Waits for a Volume to move to a new host."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index 4e08a11..204d977 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -475,7 +475,14 @@
default=False,
help="Does the test environment support block migration with "
"Cinder iSCSI volumes. Note: libvirt >= 1.2.17 is required "
- "to support this if using the libvirt compute driver."),
+ "to support this if using the libvirt compute driver.",
+ deprecated_for_removal=True,
+ deprecated_reason='This option duplicates the more generic '
+ '[compute-feature-enabled]/block_migration '
+ '_for_live_migration now that '
+ 'MIN_LIBVIRT_VERSION is >= 1.2.17 on all '
+ 'branches from stable/rocky and will be '
+ 'removed in a future release.'),
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
@@ -492,11 +499,19 @@
cfg.BoolOpt('spice_console',
default=False,
help='Enable Spice console. This configuration value should '
- 'be same as nova.conf: spice.enabled'),
+ 'be same as nova.conf: spice.enabled',
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is not being used "
+ "in Tempest, we can add it back when "
+ "adding the test cases."),
cfg.BoolOpt('rdp_console',
default=False,
help='Enable RDP console. This configuration value should '
- 'be same as nova.conf: rdp.enabled'),
+ 'be same as nova.conf: rdp.enabled',
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is not being used "
+ "in Tempest, we can add it back when "
+ "adding the test cases."),
cfg.BoolOpt('serial_console',
default=False,
help='Enable serial console. This configuration value '
@@ -506,6 +521,10 @@
default=True,
help='Does the test environment support instance rescue '
'mode?'),
+ cfg.BoolOpt('stable_rescue',
+ default=False,
+ help='Does the test environment support stable device '
+ 'instance rescue mode?'),
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the '
@@ -684,6 +703,11 @@
cfg.StrOpt('floating_network_name',
help="Default floating network name. Used to allocate floating "
"IPs when neutron is enabled."),
+ cfg.StrOpt('subnet_id',
+ default="",
+ help="Subnet id of subnet which is used for allocation of "
+ "floating IPs. Specify when two or more subnets are "
+ "present in network."),
cfg.StrOpt('public_router_id',
default="",
help="Id of the public router that provides external "
@@ -809,7 +833,8 @@
help="User name used to authenticate to an instance."),
cfg.StrOpt('image_ssh_password',
default="password",
- help="Password used to authenticate to an instance."),
+ help="Password used to authenticate to an instance.",
+ secret=True),
cfg.StrOpt('ssh_shell_prologue',
default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
help="Shell fragments to use before executing a command "
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 2c40cb1..6a97a00 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -15,6 +15,7 @@
import os
import re
+from hacking import core
import pycodestyle
@@ -25,7 +26,6 @@
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
-VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
@@ -39,6 +39,7 @@
_HAVE_NEGATIVE_DECORATOR = False
+@core.flake8ext
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
@@ -53,6 +54,7 @@
" in tempest/api/* or tempest/scenario/* tests"))
+@core.flake8ext
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
@@ -67,6 +69,7 @@
"T104: Scenario tests require a service decorator")
+@core.flake8ext
def no_setup_teardown_class_for_tests(physical_line, filename):
if pycodestyle.noqa(physical_line):
@@ -80,20 +83,7 @@
"T105: (setUp|tearDown)Class can not be used in tests")
-def no_vi_headers(physical_line, line_number, lines):
- """Check for vi editor configuration in source files.
-
- By default vi modelines can only appear in the first or
- last 5 lines of a source file.
-
- T106
- """
- # NOTE(gilliard): line_number is 1-indexed
- if line_number <= 5 or line_number > len(lines) - 5:
- if VI_HEADER_RE.match(physical_line):
- return 0, "T106: Don't put vi configuration in source files"
-
-
+@core.flake8ext
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
@@ -117,6 +107,7 @@
"T107: service tag should not be in path")
+@core.flake8ext
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
@@ -127,6 +118,7 @@
return 0, msg
+@core.flake8ext
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
@@ -137,6 +129,7 @@
yield (0, msg)
+@core.flake8ext
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
@@ -170,7 +163,8 @@
return True
-def get_resources_on_service_clients(logical_line, physical_line, filename,
+@core.flake8ext
+def get_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of GET should be consistent
@@ -197,7 +191,8 @@
yield (0, msg)
-def delete_resources_on_service_clients(logical_line, physical_line, filename,
+@core.flake8ext
+def delete_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of DELETE should be consistent
@@ -223,6 +218,7 @@
yield (0, msg)
+@core.flake8ext
def dont_import_local_tempest_into_lib(logical_line, filename):
"""Check that tempest.lib should not import local tempest code
@@ -244,6 +240,7 @@
yield (0, msg)
+@core.flake8ext
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
@@ -260,6 +257,7 @@
yield (0, msg)
+@core.flake8ext
def dont_use_config_in_tempest_lib(logical_line, filename):
"""Check that tempest.lib doesn't use tempest config
@@ -277,7 +275,8 @@
yield(0, msg)
-def dont_put_admin_tests_on_nonadmin_path(logical_line, physical_line,
+@core.flake8ext
+def dont_put_admin_tests_on_nonadmin_path(logical_line,
filename):
"""Check admin tests should exist under admin path
@@ -287,9 +286,6 @@
if 'tempest/api/' not in filename:
return
- if pycodestyle.noqa(physical_line):
- return
-
if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
@@ -298,6 +294,7 @@
yield(0, msg)
+@core.flake8ext
def unsupported_exception_attribute_PY3(logical_line):
"""Check Unsupported 'message' exception attribute in PY3
@@ -309,6 +306,7 @@
yield(0, msg)
+@core.flake8ext
def negative_test_attribute_always_applied_to_negative_tests(physical_line,
filename):
"""Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
@@ -330,22 +328,3 @@
" to all negative API tests"
)
_HAVE_NEGATIVE_DECORATOR = False
-
-
-def factory(register):
- register(import_no_clients_in_api_and_scenario_tests)
- register(scenario_tests_need_service_tags)
- register(no_setup_teardown_class_for_tests)
- register(no_vi_headers)
- register(service_tags_not_in_module_path)
- register(no_hyphen_at_end_of_rand_name)
- register(no_mutable_default_args)
- register(no_testtools_skip_decorator)
- register(get_resources_on_service_clients)
- register(delete_resources_on_service_clients)
- register(dont_import_local_tempest_into_lib)
- register(dont_use_config_in_tempest_lib)
- register(use_rand_uuid_instead_of_uuid4)
- register(dont_put_admin_tests_on_nonadmin_path)
- register(unsupported_exception_attribute_PY3)
- register(negative_test_attribute_always_applied_to_negative_tests)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/__init__.py b/tempest/lib/api_schema/response/compute/v2_73/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_73/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
new file mode 100644
index 0000000..6e491e9
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_71 import servers as servers271
+
+
+###########################################################################
+#
+# 2.73:
+#
+# The locked_reason parameter is now returned in the response body of the
+# following calls:
+#
+# - POST /servers/{server_id}/action (where the action is rebuild)
+# - PUT /servers/{server_id} (update)
+# - GET /servers/{server_id} (show)
+# - GET /servers/detail (list)
+#
+###########################################################################
+
+# The "locked_reason" parameter will either be a string or None.
+locked_reason = {'type': ['string', 'null']}
+
+rebuild_server = copy.deepcopy(servers271.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+rebuild_server['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers271.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+update_server = copy.deepcopy(servers271.update_server)
+update_server['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+update_server['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+get_server = copy.deepcopy(servers271.get_server)
+get_server['response_body']['properties']['server'][
+ 'properties'].update({'locked_reason': locked_reason})
+get_server['response_body']['properties']['server'][
+ 'required'].append('locked_reason')
+
+list_servers_detail = copy.deepcopy(servers271.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({'locked_reason': locked_reason})
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('locked_reason')
+
+# NOTE(lajoskatona): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.71 ***
+list_servers = copy.deepcopy(servers271.list_servers)
+show_server_diagnostics = copy.deepcopy(servers271.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers271.get_remote_consoles)
+list_tags = copy.deepcopy(servers271.list_tags)
+update_all_tags = copy.deepcopy(servers271.update_all_tags)
+delete_all_tags = copy.deepcopy(servers271.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers271.check_tag_existence)
+update_tag = copy.deepcopy(servers271.update_tag)
+delete_tag = copy.deepcopy(servers271.delete_tag)
diff --git a/tempest/lib/api_schema/response/volume/quota_classes.py b/tempest/lib/api_schema/response/volume/quota_classes.py
new file mode 100644
index 0000000..1a575d2
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/quota_classes.py
@@ -0,0 +1,68 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+show_quota_classes = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_class_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'volumes': {'type': 'integer'},
+ 'snapshots': {'type': 'integer'},
+ 'backups': {'type': 'integer'},
+ 'groups': {'type': 'integer'},
+ 'per_volume_gigabytes': {'type': 'integer'},
+ 'gigabytes': {'type': 'integer'},
+ 'backup_gigabytes': {'type': 'integer'},
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": {'type': 'integer'},
+ 'required': ['id', 'volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes'],
+ }
+ },
+ 'required': ['quota_class_set']
+ }
+}
+
+update_quota_classes = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_class_set': {
+ 'type': 'object',
+ 'properties': {
+ 'volumes': {'type': 'integer'},
+ 'snapshots': {'type': 'integer'},
+ 'backups': {'type': 'integer'},
+ 'groups': {'type': 'integer'},
+ 'per_volume_gigabytes': {'type': 'integer'},
+ 'gigabytes': {'type': 'integer'},
+ 'backup_gigabytes': {'type': 'integer'},
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": {'type': 'integer'},
+ 'required': ['volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes'],
+ }
+ },
+ 'required': ['quota_class_set']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/volume_types.py b/tempest/lib/api_schema/response/volume/volume_types.py
new file mode 100644
index 0000000..51b3a72
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/volume_types.py
@@ -0,0 +1,176 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+extra_specs_info = {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+}
+
+common_show_volume_type = {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info,
+ 'name': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'os-volume-type-access:is_public': {'type': 'boolean'},
+ 'qos_specs_id': {'type': ['string', 'null'], 'format': 'uuid'}
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'is_public', 'description', 'id',
+ 'os-volume-type-access:is_public']
+}
+
+show_volume_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type': common_show_volume_type,
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type']
+ }
+}
+
+delete_volume_type = {'status_code': [202]}
+
+create_volume_type = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info,
+ 'name': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'os-volume-type-access:is_public': {'type': 'boolean'}
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'is_public', 'id',
+ 'description', 'os-volume-type-access:is_public']
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type']
+ }
+}
+
+list_volume_types = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_types': {
+ 'type': 'array',
+ 'items': common_show_volume_type
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_types']
+ }
+}
+
+list_volume_types_extra_specs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info
+ },
+ 'additionalProperties': False,
+ 'required': ['extra_specs']
+ }
+}
+
+show_volume_types_extra_specs = {
+ 'status_code': [200],
+ 'response_body': extra_specs_info
+}
+
+create_volume_types_extra_specs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info
+ },
+ 'additionalProperties': False,
+ 'required': ['extra_specs']
+ }
+}
+
+delete_volume_types_extra_specs = {'status_code': [202]}
+
+update_volume_types = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': extra_specs_info,
+ 'name': {'type': 'string'},
+ 'is_public': {'type': 'boolean'},
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'}
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'is_public', 'description', 'id']
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type']
+ }
+}
+
+update_volume_type_extra_specs = {
+ 'status_code': [200],
+ 'response_body': extra_specs_info
+}
+
+add_type_access = {'status_code': [202]}
+
+remove_type_access = {'status_code': [202]}
+
+list_type_access = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type_access': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_type_id': {'type': 'string', 'format': 'uuid'},
+ 'project_id': {'type': 'string', 'format': 'uuid'},
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type_id', 'project_id']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_type_access']
+ }
+}
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 8e6d3d5..3fee489 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -684,7 +684,7 @@
def __str__(self):
"""Represent only attributes included in self.ATTRIBUTES"""
- attrs = [attr for attr in self.ATTRIBUTES if attr is not 'password']
+ attrs = [attr for attr in self.ATTRIBUTES if attr != 'password']
_repr = dict((k, getattr(self, k)) for k in attrs)
return str(_repr)
@@ -741,7 +741,7 @@
def __str__(self):
"""Represent only attributes included in self.ATTRIBUTES"""
- attrs = [attr for attr in self.ATTRIBUTES if attr is not 'password']
+ attrs = [attr for attr in self.ATTRIBUTES if attr != 'password']
_repr = dict((k, getattr(self, k)) for k in attrs)
return str(_repr)
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index d29362d..80dbc1d 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -32,6 +32,10 @@
# (min_microversion, max_microversion) on each test class if necessary.
min_microversion = None
max_microversion = LATEST_MICROVERSION
+ volume_min_microversion = None
+ volume_max_microversion = LATEST_MICROVERSION
+ placement_min_microversion = None
+ placement_max_microversion = LATEST_MICROVERSION
def check_skip_with_microversion(test_min_version, test_max_version,
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index 2ac1605..3a05f27 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -75,6 +75,11 @@
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
self.proxy_client = proxy_client
+ if (self.proxy_client and self.proxy_client.host == self.host and
+ self.proxy_client.port == self.port and
+ self.proxy_client.username == self.username):
+ raise exceptions.SSHClientProxyClientLoop(
+ host=self.host, port=self.port, username=self.username)
self._proxy_conn = None
def _get_ssh_connection(self, sleep=1.5, backoff=1):
@@ -114,8 +119,10 @@
ssh.close()
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
- " connection to %s@%s after %d attempts",
- self.username, self.host, attempts)
+ " connection to %s@%s after %d attempts. "
+ "Proxy client: %s",
+ self.username, self.host, attempts,
+ self._get_proxy_client_info())
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
@@ -219,3 +226,13 @@
cmd = 'nc %s %s' % (self.host, self.port)
chan.exec_command(cmd)
return chan
+
+ def _get_proxy_client_info(self):
+ if not self.proxy_client:
+ return 'no proxy client'
+ nested_pclient = self.proxy_client._get_proxy_client_info()
+ return ('%(username)s@%(host)s:%(port)s, nested proxy client: '
+ '%(nested_pclient)s' % {'username': self.proxy_client.username,
+ 'host': self.proxy_client.host,
+ 'port': self.proxy_client.port,
+ 'nested_pclient': nested_pclient})
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index b25b4b2..84b7ee6 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -251,6 +251,11 @@
"stdout:\n%(stdout)s")
+class SSHClientProxyClientLoop(TempestException):
+ message = ("SSH client proxy client has same host: %(host)s, port: "
+ "%(port)s and username: %(username)s as parent")
+
+
class UnknownServiceClient(TempestException):
message = "Service clients named %(services)s are not known"
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index a687137..cbf7a8c 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -35,6 +35,7 @@
from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
+from tempest.lib.api_schema.response.compute.v2_73 import servers as schemav273
from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
@@ -59,7 +60,8 @@
{'min': '2.57', 'max': '2.62', 'schema': schemav257},
{'min': '2.63', 'max': '2.69', 'schema': schemav263},
{'min': '2.70', 'max': '2.70', 'schema': schemav270},
- {'min': '2.71', 'max': None, 'schema': schemav271}]
+ {'min': '2.71', 'max': '2.72', 'schema': schemav271},
+ {'min': '2.73', 'max': None, 'schema': schemav273}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
diff --git a/tempest/lib/services/image/v2/namespace_tags_client.py b/tempest/lib/services/image/v2/namespace_tags_client.py
index cf63e50..4315f16 100644
--- a/tempest/lib/services/image/v2/namespace_tags_client.py
+++ b/tempest/lib/services/image/v2/namespace_tags_client.py
@@ -116,10 +116,6 @@
url = 'metadefs/namespaces/%s/tags' % namespace
resp, _ = self.delete(url)
- # NOTE(rosmaita): Bug 1656183 fixed the success response code for
- # this call to make it consistent with the other metadefs delete
- # calls. Accept both codes in case tempest is being run against
- # an old Glance.
- self.expected_success([200, 204], resp.status)
+ self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v3/__init__.py b/tempest/lib/services/volume/v3/__init__.py
index a1b7de3..e2fa836 100644
--- a/tempest/lib/services/volume/v3/__init__.py
+++ b/tempest/lib/services/volume/v3/__init__.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
+from tempest.lib.services.volume.v3.attachments_client import AttachmentsClient
from tempest.lib.services.volume.v3.availability_zone_client \
import AvailabilityZoneClient
from tempest.lib.services.volume.v3.backups_client import BackupsClient
@@ -43,12 +44,11 @@
from tempest.lib.services.volume.v3.volume_manage_client import \
VolumeManageClient
from tempest.lib.services.volume.v3.volumes_client import VolumesClient
-
-__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'BaseClient',
- 'CapabilitiesClient', 'EncryptionTypesClient', 'ExtensionsClient',
- 'GroupSnapshotsClient', 'GroupTypesClient', 'GroupsClient',
- 'HostsClient', 'LimitsClient', 'MessagesClient', 'QosSpecsClient',
- 'QuotaClassesClient', 'QuotasClient', 'SchedulerStatsClient',
- 'ServicesClient', 'SnapshotManageClient', 'SnapshotsClient',
- 'TransfersClient', 'TypesClient', 'VersionsClient',
- 'VolumeManageClient', 'VolumesClient']
+__all__ = ['AttachmentsClient', 'AvailabilityZoneClient', 'BackupsClient',
+ 'BaseClient', 'CapabilitiesClient', 'EncryptionTypesClient',
+ 'ExtensionsClient', 'GroupSnapshotsClient', 'GroupTypesClient',
+ 'GroupsClient', 'HostsClient', 'LimitsClient', 'MessagesClient',
+ 'QosSpecsClient', 'QuotaClassesClient', 'QuotasClient',
+ 'SchedulerStatsClient', 'ServicesClient', 'SnapshotManageClient',
+ 'SnapshotsClient', 'TransfersClient', 'TypesClient',
+ 'VersionsClient', 'VolumeManageClient', 'VolumesClient']
diff --git a/tempest/lib/services/volume/v3/attachments_client.py b/tempest/lib/services/volume/v3/attachments_client.py
new file mode 100644
index 0000000..5e448f7
--- /dev/null
+++ b/tempest/lib/services/volume/v3/attachments_client.py
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.volume import base_client
+
+
+class AttachmentsClient(base_client.BaseClient):
+ """Client class to send CRUD attachment V3 API requests"""
+
+ def show_attachment(self, attachment_id):
+ """Show volume attachment."""
+ url = "attachments/%s" % (attachment_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/quota_classes_client.py b/tempest/lib/services/volume/v3/quota_classes_client.py
index cf03918..ff62f0c 100644
--- a/tempest/lib/services/volume/v3/quota_classes_client.py
+++ b/tempest/lib/services/volume/v3/quota_classes_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import quota_classes as schema
from tempest.lib.common import rest_client
@@ -30,8 +31,8 @@
"""
url = 'os-quota-class-sets/%s' % quota_class_id
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.show_quota_classes, resp, body)
return rest_client.ResponseBody(resp, body)
def update_quota_class_set(self, quota_class_id, **kwargs):
@@ -44,6 +45,6 @@
url = 'os-quota-class-sets/%s' % quota_class_id
put_body = json.dumps({'quota_class_set': kwargs})
resp, body = self.put(url, put_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.update_quota_classes, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 705d319..7fa24a4 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import volume_types as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -48,7 +49,7 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_volume_types, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_type(self, volume_type_id):
@@ -61,7 +62,7 @@
url = "types/%s" % volume_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
def create_volume_type(self, **kwargs):
@@ -74,7 +75,7 @@
post_body = json.dumps({'volume_type': kwargs})
resp, body = self.post('types', post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.create_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_volume_type(self, volume_type_id):
@@ -85,7 +86,7 @@
https://docs.openstack.org/api-ref/block-storage/v3/index.html#delete-a-volume-type
"""
resp, body = self.delete("types/%s" % volume_type_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_types_extra_specs(self, volume_type_id, **params):
@@ -101,7 +102,8 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.list_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
@@ -109,7 +111,8 @@
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.show_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
@@ -122,14 +125,16 @@
post_body = json.dumps({'extra_specs': extra_specs})
resp, body = self.post(url, post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.create_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
"""Deletes the specified volume type extra spec."""
resp, body = self.delete("types/%s/extra_specs/%s" % (
volume_type_id, extra_spec_name))
- self.expected_success(202, resp.status)
+ self.validate_response(
+ schema.delete_volume_types_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def update_volume_type(self, volume_type_id, **kwargs):
@@ -142,7 +147,7 @@
put_body = json.dumps({'volume_type': kwargs})
resp, body = self.put('types/%s' % volume_type_id, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.update_volume_types, resp, body)
return rest_client.ResponseBody(resp, body)
def update_volume_type_extra_specs(self, volume_type_id, extra_spec_name,
@@ -162,7 +167,8 @@
put_body = json.dumps(extra_specs)
resp, body = self.put(url, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(
+ schema.update_volume_type_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def add_type_access(self, volume_type_id, **kwargs):
@@ -175,7 +181,7 @@
post_body = json.dumps({'addProjectAccess': kwargs})
url = 'types/%s/action' % volume_type_id
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.add_type_access, resp, body)
return rest_client.ResponseBody(resp, body)
def remove_type_access(self, volume_type_id, **kwargs):
@@ -188,7 +194,7 @@
post_body = json.dumps({'removeProjectAccess': kwargs})
url = 'types/%s/action' % volume_type_id
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.remove_type_access, resp, body)
return rest_client.ResponseBody(resp, body)
def list_type_access(self, volume_type_id):
@@ -201,5 +207,5 @@
url = 'types/%s/os-volume-type-access' % volume_type_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_type_access, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index db8da84..efdfe8e 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -810,6 +810,42 @@
server_details = cls.os_admin.servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
+ def _get_bdm(self, source_id, source_type, delete_on_termination=False):
+ bd_map_v2 = [{
+ 'uuid': source_id,
+ 'source_type': source_type,
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': delete_on_termination}]
+ return {'block_device_mapping_v2': bd_map_v2}
+
+ def boot_instance_from_resource(self, source_id,
+ source_type,
+ keypair=None,
+ security_group=None,
+ delete_on_termination=False,
+ name=None):
+ create_kwargs = dict()
+ if keypair:
+ create_kwargs['key_name'] = keypair['name']
+ if security_group:
+ create_kwargs['security_groups'] = [
+ {'name': security_group['name']}]
+ create_kwargs.update(self._get_bdm(
+ source_id,
+ source_type,
+ delete_on_termination=delete_on_termination))
+ if name:
+ create_kwargs['name'] = name
+
+ return self.create_server(image_id='', **create_kwargs)
+
+ def create_volume_from_image(self):
+ img_uuid = CONF.compute.image_ref
+ vol_name = data_utils.rand_name(
+ self.__class__.__name__ + '-volume-origin')
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
+
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
@@ -930,18 +966,21 @@
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
- p_status = ['ACTIVE']
- # NOTE(vsaienko) With Ironic, instances live on separate hardware
- # servers. Neutron does not bind ports for Ironic instances, as a
- # result the port remains in the DOWN state.
- # TODO(vsaienko) remove once bug: #1599836 is resolved.
- if getattr(CONF.service_available, 'ironic', False):
- p_status.append('DOWN')
+
+ def _is_active(port):
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state. This has been fixed
+ # with the introduction of the networking-baremetal plugin but
+ # it's not mandatory (and is not used on all stable branches).
+ return (port['status'] == 'ACTIVE' or
+ port.get('binding:vnic_type') == 'baremetal')
+
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
if (netutils.is_valid_ipv4(fxip["ip_address"]) and
- p['status'] in p_status)]
+ _is_active(p))]
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
@@ -972,13 +1011,18 @@
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
- result = client.create_floatingip(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing['tenant_id'],
- fixed_ip_address=ip4
- )
+
+ kwargs = {
+ 'floating_network_id': external_network_id,
+ 'port_id': port_id,
+ 'tenant_id': thing['tenant_id'],
+ 'fixed_ip_address': ip4,
+ }
+ if CONF.network.subnet_id:
+ kwargs['subnet_id'] = CONF.network.subnet_id
+ result = client.create_floatingip(**kwargs)
floating_ip = result['floatingip']
+
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_floatingip,
floating_ip['id'])
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index cee543b..4cd860d 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -61,7 +61,11 @@
def cinder_show(self, volume):
got_volume = self.volumes_client.show_volume(volume['id'])['volume']
- self.assertEqual(volume, got_volume)
+ # Exclude updated_at because of bug 1838202.
+ excluded_keys = ['updated_at']
+ self.assertThat(
+ volume, custom_matchers.MatchesDictExceptForKeys(
+ got_volume, excluded_keys=excluded_keys))
def nova_reboot(self, server):
self.servers_client.reboot_server(server['id'], type='SOFT')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index f46c7e8..d8584ec 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -346,10 +346,19 @@
network_id=CONF.network.public_network_id)['subnets']
if s['ip_version'] == 4
]
- self.assertEqual(1, len(v4_subnets),
- "Found %d IPv4 subnets" % len(v4_subnets))
- external_ips = [v4_subnets[0]['gateway_ip']]
+ if len(v4_subnets) > 1:
+ self.assertTrue(
+ CONF.network.subnet_id,
+ "Found %d subnets. Specify subnet using configuration "
+ "option [network].subnet_id."
+ % len(v4_subnets))
+ subnet = self.os_admin.subnets_client.show_subnet(
+ CONF.network.subnet_id)['subnet']
+ external_ips = [subnet['gateway_ip']]
+ else:
+ external_ips = [v4_subnets[0]['gateway_ip']]
+
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 6ed7e30..0782389 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -31,42 +31,6 @@
# breathing room to get through deletes in the time allotted.
TIMEOUT_SCALING_FACTOR = 2
- def _create_volume_from_image(self):
- img_uuid = CONF.compute.image_ref
- vol_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
-
- def _get_bdm(self, source_id, source_type, delete_on_termination=False):
- bd_map_v2 = [{
- 'uuid': source_id,
- 'source_type': source_type,
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'delete_on_termination': delete_on_termination}]
- return {'block_device_mapping_v2': bd_map_v2}
-
- def _boot_instance_from_resource(self, source_id,
- source_type,
- keypair=None,
- security_group=None,
- delete_on_termination=False,
- name=None):
- create_kwargs = dict()
- if keypair:
- create_kwargs['key_name'] = keypair['name']
- if security_group:
- create_kwargs['security_groups'] = [
- {'name': security_group['name']}]
- create_kwargs.update(self._get_bdm(
- source_id,
- source_type,
- delete_on_termination=delete_on_termination))
- if name:
- create_kwargs['name'] = name
-
- return self.create_server(image_id='', **create_kwargs)
-
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
@@ -104,8 +68,8 @@
# create an instance from volume
LOG.info("Booting instance 1 from volume")
- volume_origin = self._create_volume_from_image()
- instance_1st = self._boot_instance_from_resource(
+ volume_origin = self.create_volume_from_image()
+ instance_1st = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
@@ -124,7 +88,7 @@
self._delete_server(instance_1st)
# create a 2nd instance from volume
- instance_2nd = self._boot_instance_from_resource(
+ instance_2nd = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
@@ -149,10 +113,10 @@
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
server_from_snapshot = (
- self._boot_instance_from_resource(source_id=volume['id'],
- source_type='volume',
- keypair=keypair,
- security_group=security_group))
+ self.boot_instance_from_resource(source_id=volume['id'],
+ source_type='volume',
+ keypair=keypair,
+ security_group=security_group))
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
@@ -171,13 +135,13 @@
@utils.services('compute', 'image', 'volume')
def test_create_server_from_volume_snapshot(self):
# Create a volume from an image
- boot_volume = self._create_volume_from_image()
+ boot_volume = self.create_volume_from_image()
# Create a snapshot
boot_snapshot = self.create_volume_snapshot(boot_volume['id'])
# Create a server from a volume snapshot
- server = self._boot_instance_from_resource(
+ server = self.boot_instance_from_resource(
source_id=boot_snapshot['id'],
source_type='snapshot',
delete_on_termination=True)
@@ -203,16 +167,23 @@
self.assertEqual(created_volume[0]['id'],
created_volume_info['attachments'][0]['volume_id'])
+ # Delete the server and wait
+ self._delete_server(server)
+
+ # Assert that the underlying volume is gone before class tearDown
+ # to prevent snapshot deletion from failing
+ self.volumes_client.wait_for_resource_deletion(created_volume[0]['id'])
+
@decorators.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
'Cinder volume snapshots are disabled')
@utils.services('compute', 'volume', 'image')
def test_image_defined_boot_from_volume(self):
# create an instance from image-backed volume
- volume_origin = self._create_volume_from_image()
+ volume_origin = self.create_volume_from_image()
name = data_utils.rand_name(self.__class__.__name__ +
'-volume-backed-server')
- instance1 = self._boot_instance_from_resource(
+ instance1 = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
delete_on_termination=True,
@@ -288,7 +259,7 @@
self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
# Boot a server from the encrypted volume
- server = self._boot_instance_from_resource(
+ server = self.boot_instance_from_resource(
source_id=volume['id'],
source_type='volume',
delete_on_termination=False)
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 7a037eb..b20b60e 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -194,11 +194,14 @@
def get_plugin_load_tests_tuple(self):
load_tests_dict = {}
for plug in self.ext_plugins:
+ LOG.info('Loading tests from Tempest plugin: %s', plug.name)
load_tests_dict[plug.name] = plug.obj.load_tests()
return load_tests_dict
def register_plugin_opts(self, conf):
for plug in self.ext_plugins:
+ LOG.info('Register additional config options from Tempest '
+ 'plugin: %s', plug.name)
try:
plug.obj.register_opts(conf)
except Exception:
@@ -209,6 +212,9 @@
plugin_options = []
for plug in self.ext_plugins:
opt_list = plug.obj.get_opt_lists()
+ LOG.info('List additional config options registered by '
+ 'Tempest plugin: %s', plug.name)
+
if opt_list:
plugin_options.extend(opt_list)
return plugin_options
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index b349bba..a962e37 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -28,7 +28,6 @@
self.os_username = 'fake_user'
self.os_password = 'fake_password'
self.os_project_name = 'fake_project_name'
- self.os_tenant_name = None
self.os_domain_name = 'fake_domain'
self.tag = 'fake'
self.concurrency = 2
@@ -100,15 +99,6 @@
self.assertEqual(self.opts.os_password, admin_creds.password)
self.assertFalse(hasattr(admin_creds, 'domain_name'))
- def test_get_credential_provider_with_tenant(self):
- self.opts.os_project_name = None
- self.opts.os_tenant_name = 'fake_tenant'
- cp = account_generator.get_credential_provider(self.opts)
- admin_creds = cp.default_admin_creds
- self.assertEqual(self.opts.os_tenant_name, admin_creds.tenant_name)
- self.assertEqual(self.opts.os_username, admin_creds.username)
- self.assertEqual(self.opts.os_password, admin_creds.password)
-
class TestAccountGeneratorV3(TestAccountGeneratorV2):
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index ae08d02..8366290 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -196,6 +196,7 @@
is_save_state=is_save_state,
is_preserve=is_preserve,
is_dry_run=is_dry_run,
+ project_id='b8e3ece07bb049138d224436756e3b57',
data={},
saved_state_json=self.saved_state
)
@@ -533,6 +534,135 @@
self._test_saved_state_true([(self.get_method, self.response, 200)])
+class TestVolumeQuotaService(BaseCmdServiceTests):
+
+ service_class = 'VolumeQuotaService'
+ service_name = 'volume_quota_service'
+ response = {
+ "quota_set": {
+ "groups":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "per_volume_gigabytes":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "volumes":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "gigabytes":
+ {"reserved": 0, "limit": 1000, "in_use": 0},
+ "backup_gigabytes":
+ {"reserved": 0, "limit": 1000, "in_use": 0},
+ "snapshots":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "volumes_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "snapshots_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "backups":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "gigabytes_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "id": "b8e3ece07bb049138d224436756e3b57"
+ }
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 200),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+class TestNovaQuotaService(BaseCmdServiceTests):
+
+ service_class = 'NovaQuotaService'
+ service_name = 'nova_quota_service'
+ response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 128,
+ "maxPersonality": 5,
+ "totalServerGroupsUsed": 0,
+ "maxImageMeta": 128,
+ "maxPersonalitySize": 10240,
+ "maxTotalKeypairs": 100,
+ "maxSecurityGroupRules": 20,
+ "maxServerGroups": 10,
+ "totalCoresUsed": 0,
+ "totalRAMUsed": 0,
+ "totalInstancesUsed": 0,
+ "maxSecurityGroups": 10,
+ "totalFloatingIpsUsed": 0,
+ "maxTotalCores": 20,
+ "maxServerGroupMembers": 10,
+ "maxTotalFloatingIps": 10,
+ "totalSecurityGroupsUsed": 0,
+ "maxTotalInstances": 10,
+ "maxTotalRAMSize": 51200
+ }
+ }
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+class TestNetworkQuotaService(BaseCmdServiceTests):
+
+ service_class = 'NetworkQuotaService'
+ service_name = 'network_quota_service'
+ response = {
+ "quotas": [{
+ "subnet": 110,
+ "network": 100,
+ "floatingip": 50,
+ "tenant_id": "81e8490db559474dacb2212fca9cca2d",
+ "subnetpool": -1,
+ "security_group_rule": 100,
+ "trunk": -1,
+ "security_group": 10,
+ "router": 10,
+ "rbac_policy": 10, "project_id":
+ "81e8490db559474dacb2212fca9cca2d", "port": 500
+ }]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
# Begin network service classes
class TestNetworkService(BaseCmdServiceTests):
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 8997a4c..e9bbcc2 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -153,6 +153,15 @@
result = ["b\'" + x + "\'" for x in result]
self.assertEqual(result, tests)
+ def test_tempest_run_with_worker_file(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ worker_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(worker_file.close)
+ worker_file.write(
+ '- worker:\n - passing\n concurrency: 3'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--worker-file=%s' % path], 0)
+
def test_tempest_run_with_whitelist(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 6275f22..e3bb836 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -15,6 +15,7 @@
import time
import mock
+from oslo_utils.fixture import uuidsentinel as uuids
from tempest.common import waiters
from tempest import exceptions
@@ -232,3 +233,51 @@
mock_show.assert_has_calls([mock.call(volume_id),
mock.call(volume_id)])
mock_sleep.assert_called_once_with(1)
+
+ def test_wait_for_volume_attachment(self):
+ vol_detached = {'volume': {'attachments': []}}
+ vol_attached = {'volume': {'attachments': [
+ {'attachment_id': uuids.attachment_id}]}}
+ show_volume = mock.MagicMock(side_effect=[
+ vol_attached, vol_attached, vol_detached])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=5,
+ show_volume=show_volume)
+ self.patch('time.time')
+ self.patch('time.sleep')
+ waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
+ uuids.attachment_id)
+ # Assert that show volume is called until the attachment is removed.
+ show_volume.assert_has_calls = [mock.call(uuids.volume_id),
+ mock.call(uuids.volume_id),
+ mock.call(uuids.volume_id)]
+
+ def test_wait_for_volume_attachment_timeout(self):
+ show_volume = mock.MagicMock(return_value={
+ 'volume': {'attachments': [
+ {'attachment_id': uuids.attachment_id}]}})
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ # Assert that a timeout is raised if the attachment remains.
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_attachment_remove,
+ client, uuids.volume_id, uuids.attachment_id)
+
+ def test_wait_for_volume_attachment_not_present(self):
+ show_volume = mock.MagicMock(return_value={
+ 'volume': {'attachments': []}})
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
+ uuids.attachment_id)
+ # Assert that show volume is only called once before we return
+ show_volume.assert_called_once_with(uuids.volume_id)
diff --git a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
index 2774c44..8aed7d7 100644
--- a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
@@ -20,78 +20,116 @@
class TestApplicationCredentialsClient(base.BaseServiceTest):
FAKE_CREATE_APP_CRED = {
"application_credential": {
- "description": "fake application credential",
+ "name": "monitoring",
+ "secret": "rEaqvJka48mpv",
+ "description": "Application credential for monitoring.",
+ "expires_at": "2018-02-27T18:30:59Z",
"roles": [
+ {"name": "Reader"}
+ ],
+ "access_rules": [
{
- "id": "c60fdd45",
- "domain_id": None,
- "name": "Member"
+ "path": "/v2.0/metrics",
+ "method": "GET",
+ "service": "monitoring"
}
],
- "expires_at": "2019-02-27T18:30:59.999999Z",
- "secret": "_BVq0xU5L",
- "unrestricted": None,
- "project_id": "ddef321",
- "id": "5499a186",
- "name": "one"
+ "unrestricted": False
}
}
FAKE_LIST_APP_CREDS = {
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/application_credentials",
+ "previous": None,
+ "next": None
+ },
"application_credentials": [
{
- "description": "fake application credential",
+ "description": "Application credential for backups.",
"roles": [
{
"domain_id": None,
- "name": "Member",
- "id": "c60fdd45",
+ "name": "Writer",
+ "id": "6aff702516544aeca22817fd3bc39683"
}
],
- "expires_at": "2018-02-27T18:30:59.999999Z",
- "unrestricted": None,
- "project_id": "ddef321",
- "id": "5499a186",
- "name": "one"
+ "access_rules": [
+ ],
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/" +
+ "application_credentials/" +
+ "308a7e905eee4071aac5971744c061f6"
+ },
+ "expires_at": "2018-02-27T18:30:59.000000",
+ "unrestricted": False,
+ "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+ "id": "308a7e905eee4071aac5971744c061f6",
+ "name": "backups"
},
{
- "description": None,
+ "description": "Application credential for monitoring.",
"roles": [
{
- "id": "0f1837c8",
+ "id": "6aff702516544aeca22817fd3bc39683",
"domain_id": None,
- "name": "anotherrole"
- },
- {
- "id": "c60fdd45",
- "domain_id": None,
- "name": "Member"
+ "name": "Reader"
}
],
- "expires_at": None,
- "unrestricted": None,
- "project_id": "c5403d938",
- "id": "d441c904f",
- "name": "two"
+ "access_rules": [
+ {
+ "path": "/v2.0/metrics",
+ "id": "07d719df00f349ef8de77d542edf010c",
+ "service": "monitoring",
+ "method": "GET"
+ }
+ ],
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/" +
+ "application_credentials/" +
+ "58d61ff8e6e34accb35874016d1dba8b"
+ },
+ "expires_at": "2018-02-27T18:30:59.000000",
+ "unrestricted": False,
+ "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+ "id": "58d61ff8e6e34accb35874016d1dba8b",
+ "name": "monitoring"
}
]
}
FAKE_APP_CRED_INFO = {
"application_credential": {
- "description": None,
+ "description": "Application credential for monitoring.",
"roles": [
{
+ "id": "6aff702516544aeca22817fd3bc39683",
"domain_id": None,
- "name": "Member",
- "id": "c60fdd45",
+ "name": "Reader"
}
],
- "expires_at": None,
- "unrestricted": None,
- "project_id": "ddef321",
- "id": "5499a186",
- "name": "one"
+ "access_rules": [
+ {
+ "path": "/v2.0/metrics",
+ "id": "07d719df00f349ef8de77d542edf010c",
+ "service": "monitoring",
+ "method": "GET"
+ }
+ ],
+ "links": {
+ "self": "http://example.com/identity/v3/users/" +
+ "fd786d56402c4d1691372e7dee0d00b5/" +
+ "application_credentials/" +
+ "58d61ff8e6e34accb35874016d1dba8b"
+ },
+ "expires_at": "2018-02-27T18:30:59.000000",
+ "unrestricted": False,
+ "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+ "id": "58d61ff8e6e34accb35874016d1dba8b",
+ "name": "monitoring"
}
}
@@ -118,7 +156,7 @@
self.FAKE_APP_CRED_INFO,
bytes_body,
user_id="123456",
- application_credential_id="5499a186")
+ application_credential_id="58d61ff8e6e34accb35874016d1dba8b")
def _test_list_app_creds(self, bytes_body=False):
self.check_service_client_function(
@@ -152,5 +190,5 @@
'tempest.lib.common.rest_client.RestClient.delete',
{},
user_id="123456",
- application_credential_id="5499a186",
+ application_credential_id="58d61ff8e6e34accb35874016d1dba8b",
status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_groups_client.py b/tempest/tests/lib/services/identity/v3/test_groups_client.py
index 38cf3ae..e3c9851 100644
--- a/tempest/tests/lib/services/identity/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_groups_client.py
@@ -211,3 +211,13 @@
group_id='6e13e2068cf9466e98950595baf6bb35',
user_id='642688fa65a84217b86cef3c063de2b9',
)
+
+ def test_delete_group_user(self):
+ self.check_service_client_function(
+ self.client.delete_group_user,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ status=204,
+ group_id='6e13e2068cf9466e98950595baf6bb35',
+ user_id='642688fa65a84217b86cef3c063de2b9',
+ )
diff --git a/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py b/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
index 2faa5be..6b282f4 100644
--- a/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
@@ -118,9 +118,17 @@
def test_show_namespace_tag_with_bytes_body(self):
self._test_show_namespace_tag_definition(bytes_body=True)
+ def test_delete_namespace_tag_definition(self):
+ self.check_service_client_function(
+ self.client.delete_namespace_tag,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=204,
+ namespace="OS::Compute::Hypervisor",
+ tag_name="added-sample-tag")
+
def test_delete_all_namespace_tags(self):
self.check_service_client_function(
self.client.delete_namespace_tags,
'tempest.lib.common.rest_client.RestClient.delete',
- {}, status=200,
+ {}, status=204,
namespace="OS::Compute::Hypervisor")
diff --git a/tempest/tests/lib/services/image/v2/test_namespaces_client.py b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
index 3b057ad..db1ffae 100644
--- a/tempest/tests/lib/services/image/v2/test_namespaces_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
@@ -18,16 +18,64 @@
class TestNamespacesClient(base.BaseServiceTest):
- FAKE_CREATE_SHOW_NAMESPACE = {
- "namespace": "OS::Compute::Hypervisor",
- "visibility": "public",
- "description": "Tempest",
- "display_name": u"\u2740(*\xb4\u25e1`*)\u2740",
- "protected": True
+ FAKE_CREATE_NAMESPACE = {
+ "created_at": "2016-05-19T16:05:48Z",
+ "description": "A metadata definitions namespace.",
+ "display_name": "An Example Namespace",
+ "namespace": "FredCo::SomeCategory::Example",
+ "owner": "c60b1d57c5034e0d86902aedf8c49be0",
+ "protected": True,
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/"
+ "FredCo::SomeCategory::Example",
+ "updated_at": "2016-05-19T16:05:48Z",
+ "visibility": "public"
+ }
+
+ FAKE_SHOW_NAMESPACE = {
+ "created_at": "2016-06-28T14:57:10Z",
+ "description": "The libvirt compute driver options.",
+ "display_name": "libvirt Driver Options",
+ "namespace": "OS::Compute::Libvirt",
+ "owner": "admin",
+ "properties": {
+ "boot_menu": {
+ "description": "If true, enables the BIOS bootmenu.",
+ "enum": [
+ "true",
+ "false"
+ ],
+ "title": "Boot Menu",
+ "type": "string"
+ },
+ "serial_port_count": {
+ "description": "Specifies the count of serial ports.",
+ "minimum": 0,
+ "title": "Serial Port Count",
+ "type": "integer"
+ }
+ },
+ "protected": True,
+ "resource_type_associations": [
+ {
+ "created_at": "2016-06-28T14:57:10Z",
+ "name": "OS::Glance::Image",
+ "prefix": "hw_"
+ },
+ {
+ "created_at": "2016-06-28T14:57:10Z",
+ "name": "OS::Nova::Flavor",
+ "prefix": "hw:"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt",
+ "visibility": "public"
}
FAKE_LIST_NAMESPACES = {
- "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc",
+ "first": "/v2/metadefs/namespaces?sort_key=created_at&"
+ "sort_dir=asc",
"namespaces": [
{
"created_at": "2014-08-28T17:13:06Z",
@@ -89,7 +137,7 @@
self.check_service_client_function(
self.client.show_namespace,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_CREATE_SHOW_NAMESPACE,
+ self.FAKE_SHOW_NAMESPACE,
bytes_body,
namespace="OS::Compute::Hypervisor")
@@ -104,7 +152,7 @@
self.check_service_client_function(
self.client.create_namespace,
'tempest.lib.common.rest_client.RestClient.post',
- self.FAKE_CREATE_SHOW_NAMESPACE,
+ self.FAKE_CREATE_NAMESPACE,
bytes_body,
namespace="OS::Compute::Hypervisor",
visibility="public", description="Tempest",
diff --git a/tempest/tests/lib/services/image/v2/test_resource_types_client.py b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
index 741b4eb..089e62e 100644
--- a/tempest/tests/lib/services/image/v2/test_resource_types_client.py
+++ b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
@@ -48,6 +48,28 @@
]
}
+ FAKE_CREATE_RESOURCE_TYPE_ASSOCIATION = {
+ "created_at": "2020-03-07T18:20:44Z",
+ "name": "OS::Glance::Image",
+ "prefix": "hw:",
+ "updated_at": "2020-03-07T18:20:44Z"
+ }
+
+ FAKE_LIST_RESOURCE_TYPE_ASSOCIATION = {
+ "resource_type_associations": [
+ {
+ "created_at": "2020-03-07T18:20:44Z",
+ "name": "OS::Nova::Flavor",
+ "prefix": "hw:"
+ },
+ {
+ "created_at": "2020-03-07T18:20:44Z",
+ "name": "OS::Glance::Image",
+ "prefix": "hw_"
+ }
+ ]
+ }
+
def setUp(self):
super(TestResourceTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -62,8 +84,48 @@
self.FAKE_LIST_RESOURCETYPES,
bytes_body)
+ def _test_create_resource_type_association(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_resource_type_association,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_RESOURCE_TYPE_ASSOCIATION,
+ bytes_body, status=201,
+ namespace_id="OS::Compute::Hypervisor",
+ name="OS::Glance::Image", prefix="hw_",
+ )
+
+ def _test_list_resource_type_association(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_resource_type_association,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_RESOURCE_TYPE_ASSOCIATION,
+ bytes_body,
+ namespace_id="OS::Compute::Hypervisor",
+ )
+
def test_list_resource_types_with_str_body(self):
self._test_list_resource_types()
def test_list_resource_types_with_bytes_body(self):
self._test_list_resource_types(bytes_body=True)
+
+ def test_delete_resource_type_association(self):
+ self.check_service_client_function(
+ self.client.delete_resource_type_association,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=204,
+ namespace_id="OS::Compute::Hypervisor",
+ resource_name="OS::Glance::Image",
+ )
+
+ def test_create_resource_type_association_with_str_body(self):
+ self._test_create_resource_type_association()
+
+ def test_create_resource_type_association_with_bytes_body(self):
+ self._test_create_resource_type_association(bytes_body=True)
+
+ def test_list_resource_type_association_with_str_body(self):
+ self._test_list_resource_type_association()
+
+ def test_list_resource_type_association_with_bytes_body(self):
+ self._test_list_resource_type_association(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_agents_client.py b/tempest/tests/lib/services/network/test_agents_client.py
index aabc6ce..8904882 100644
--- a/tempest/tests/lib/services/network/test_agents_client.py
+++ b/tempest/tests/lib/services/network/test_agents_client.py
@@ -22,12 +22,145 @@
FAKE_AGENT_ID = "d32019d3-bc6e-4319-9c1d-6123f4135a88"
+ FAKE_LIST_DATA = {
+ "agents": [
+ {
+ "binary": "neutron-dhcp-agent",
+ "description": None,
+ "availability_zone": "nova",
+ "heartbeat_timestamp": "2017-09-12 19:39:56",
+ "admin_state_up": True,
+ "alive": True,
+ "id": "840d5d68-5759-4e9e-812f",
+ "topic": "dhcp_agent",
+ "host": "agenthost1",
+ "agent_type": "DHCP agent",
+ "started_at": "2017-09-12 19:35:36",
+ "created_at": "2017-09-12 19:35:36",
+ "resources_synced": None,
+ "configurations": {
+ "subnets": 2,
+ "dhcp_lease_duration": 86400,
+ "dhcp_driver": "neutron.agent",
+ "networks": 1,
+ "log_agent_heartbeats": False,
+ "ports": 3
+ }
+ }
+ ]
+ }
+
+ FAKE_SHOW_DATA = {
+ "agent": {
+ "binary": "neutron-openvswitch-agent",
+ "description": None,
+ "availability_zone": None,
+ "heartbeat_timestamp": "2017-09-12 19:40:38",
+ "admin_state_up": True,
+ "alive": True,
+ "id": "04c62b91-b799-48b7-9cd5-2982db6df9c6",
+ "topic": "N/A",
+ "host": "agenthost1",
+ "agent_type": "Open vSwitch agent",
+ "started_at": "2017-09-12 19:35:38",
+ "created_at": "2017-09-12 19:35:38",
+ "resources_synced": True,
+ "configurations": {
+ "ovs_hybrid_plug": True,
+ "in_distributed_mode": False,
+ "datapath_type": "system",
+ "vhostuser_socket_dir": "/var/run/openvswitch",
+ "tunneling_ip": "172.16.78.191",
+ "arp_responder_enabled": False,
+ "devices": 0,
+ "ovs_capabilities": {
+ "datapath_types": [
+ "netdev",
+ "system"
+ ],
+ "iface_types": [
+ "geneve",
+ "gre",
+ "internal",
+ "ipsec_gre",
+ "lisp",
+ "patch",
+ "stt",
+ "system",
+ "tap",
+ "vxlan"
+ ]
+ },
+ "log_agent_heartbeats": False,
+ "l2_population": False,
+ "tunnel_types": [
+ "vxlan"
+ ],
+ "extensions": [],
+ "enable_distributed_routing": False,
+ "bridge_mappings": {
+ "public": "br-ex"
+ }
+ }
+ }
+ }
+
+ FAKE_UPDATE_DATA = {
+ "agent": {
+ "description": "My OVS agent for OpenStack"
+ }
+ }
+
def setUp(self):
super(TestAgentsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.agents_client = agents_client.AgentsClient(
fake_auth, "network", "regionOne")
+ def _test_show_agent(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.show_agent,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_SHOW_DATA,
+ bytes_body,
+ status=200,
+ agent_id=self.FAKE_AGENT_ID)
+
+ def _test_update_agent(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.update_agent,
+ "tempest.lib.common.rest_client.RestClient.put",
+ self.FAKE_UPDATE_DATA,
+ bytes_body,
+ status=200,
+ agent_id=self.FAKE_AGENT_ID)
+
+ def _test_list_agents(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.list_agents,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_LIST_DATA,
+ bytes_body,
+ status=200)
+
+ def test_show_agent_with_str_body(self):
+ self._test_show_agent()
+
+ def test_show_agent_with_bytes_body(self):
+ self._test_show_agent(bytes_body=True)
+
+ def test_update_agent_with_str_body(self):
+ self._test_update_agent()
+
+ def test_update_agent_with_bytes_body(self):
+ self._test_update_agent(bytes_body=True)
+
+ def test_list_agent_with_str_body(self):
+ self._test_list_agents()
+
+ def test_list_agent_with_bytes_body(self):
+ self._test_list_agents(bytes_body=True)
+
def test_delete_agent(self):
self.check_service_client_function(
self.agents_client.delete_agent,
diff --git a/tempest/tests/lib/services/network/test_routers_client.py b/tempest/tests/lib/services/network/test_routers_client.py
index 2fa5993..f5dcc7d 100644
--- a/tempest/tests/lib/services/network/test_routers_client.py
+++ b/tempest/tests/lib/services/network/test_routers_client.py
@@ -20,37 +20,78 @@
class TestRoutersClient(base.BaseServiceTest):
FAKE_CREATE_ROUTER = {
"router": {
- "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
"external_gateway_info": {
- "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b",
"enable_snat": True,
"external_fixed_ips": [
{
- "subnet_id": "255.255.255.0",
- "ip": "192.168.10.1"
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
}
- ]
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
- "admin_state_up": True,
- "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "routes": [],
+ "revision_number": 1,
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": None,
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
}
}
FAKE_UPDATE_ROUTER = {
"router": {
- "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
"external_gateway_info": {
- "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b",
"enable_snat": True,
"external_fixed_ips": [
{
- "subnet_id": "255.255.255.0",
- "ip": "192.168.10.1"
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
}
- ]
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
- "admin_state_up": False,
- "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "revision_number": 3,
+ "routes": [
+ {
+ "destination": "179.24.1.0/24",
+ "nexthop": "172.24.3.99"
+ }
+ ],
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": None,
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
}
}
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
index a16d1d7..1749b03 100644
--- a/tempest/tests/lib/services/object_storage/test_object_client.py
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -69,7 +69,7 @@
# If the expected initial status is not 100, then an exception
# should be thrown and the connection closed
- if initial_status is 100:
+ if initial_status == 100:
status, reason = \
self.object_client.create_object_continue(cnt, obj, req_data)
else:
@@ -91,7 +91,7 @@
mock_poc.return_value.endheaders.assert_called_once_with()
# The following steps are only taken if the initial status is 100
- if initial_status is 100:
+ if initial_status == 100:
# Verify that the method returned what it was supposed to
self.assertEqual(status, 201)
diff --git a/tempest/tests/lib/services/volume/v3/test_attachments_client.py b/tempest/tests/lib/services/volume/v3/test_attachments_client.py
new file mode 100644
index 0000000..52c94e5
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_attachments_client.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v3 import attachments_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
+
+class TestAttachmentsClient(base.BaseServiceTest):
+
+ FAKE_ATTACHMENT_INFO = {
+ "attachment": {
+ "status": "attaching",
+ "detached_at": "2015-09-16T09:28:52.000000",
+ "connection_info": {},
+ "attached_at": "2015-09-16T09:28:52.000000",
+ "attach_mode": "ro",
+ "instance": uuids.instance_id,
+ "volume_id": uuids.volume_id,
+ "id": uuids.id,
+ }
+ }
+
+ def setUp(self):
+ super(TestAttachmentsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = attachments_client.AttachmentsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def test_show_attachment(self):
+ self.check_service_client_function(
+ self.client.show_attachment,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ATTACHMENT_INFO, attachment_id=uuids.id)
diff --git a/tempest/tests/lib/services/volume/v3/test_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
index 2efd2e6..1ea4c65 100644
--- a/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
@@ -20,61 +20,85 @@
class TestSnapshotsClient(base.BaseServiceTest):
FAKE_CREATE_SNAPSHOT = {
"snapshot": {
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "force": True
+ "created_at": "2019-03-11T16:24:34.469003",
+ "description": "Daily backup",
+ "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e"
}
}
- FAKE_UPDATE_SNAPSHOT_REQUEST = {
- "metadata": {
- "key": "v1"
+ FAKE_UPDATE_SNAPSHOT_RESPONSE = {
+ "snapshot": {
+ "created_at": "2019-03-12T04:53:53.426591",
+ "description": "This is yet, another snapshot.",
+ "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-002",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "070c942d-9909-42e9-a467-7a781f150c58"
}
}
FAKE_INFO_SNAPSHOT = {
"snapshot": {
- "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "status": "available",
- "size": 30,
- "created_at": "2012-02-29T03:50:07Z"
+ "created_at": "2019-03-12T04:42:00.809352",
+ "description": "Daily backup",
+ "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "os-extended-snapshot-attributes:progress": "0%",
+ "os-extended-snapshot-attributes:project_id":
+ "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37"
}
}
FAKE_LIST_SNAPSHOTS = {
"snapshots": [
{
- "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "status": "available",
- "size": 30,
- "created_at": "2012-02-29T03:50:07Z",
+ "created_at": "2019-03-11T16:24:36.464445",
+ "description": "Daily backup",
+ "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
- "contents": "junk"
- }
- },
- {
- "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
- "display_name": "snap-002",
- "display_description": "Weekly backup",
- "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
- "status": "available",
- "size": 25,
- "created_at": "2012-03-19T01:52:47Z",
- "metadata": {}
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "os-extended-snapshot-attributes:progress": "0%",
+ "os-extended-snapshot-attributes:project_id":
+ "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6"
}
]
}
FAKE_SNAPSHOT_METADATA_ITEM = {
+ "metadata": {
+ "key": "value"
+ }
+ }
+
+ FAKE_SNAPSHOT_KEY = {
"meta": {
- "key1": "value1"
+ "key": "new_value"
}
}
@@ -99,7 +123,7 @@
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_INFO_SNAPSHOT,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_list_snapshots(self, bytes_body=False):
self.check_service_client_function(
@@ -113,48 +137,48 @@
self.check_service_client_function(
self.client.create_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.post',
- self.FAKE_INFO_SNAPSHOT,
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- metadata={"key": "v1"})
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ metadata={"key": "value"})
def _test_update_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
+ self.FAKE_UPDATE_SNAPSHOT_RESPONSE,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_show_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_update_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
- bytes_body, snapshot_id="cbc36478b0bd8e67e89")
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
+ bytes_body, snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_update_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_INFO_SNAPSHOT,
+ self.FAKE_SNAPSHOT_KEY,
bytes_body, volume_type_id="cbc36478b0bd8e67e89")
def _test_show_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_SNAPSHOT_METADATA_ITEM,
+ self.FAKE_SNAPSHOT_KEY,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
id="key1")
def test_create_snapshot_with_str_body(self):
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 83c1abb..7c31185 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -101,17 +101,6 @@
'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
"\n"))
- def test_no_vi_headers(self):
- # NOTE(mtreinish) The lines parameter is used only for finding the
- # line location in the file. So these tests just pass a list of an
- # arbitrary length to use for verifying the check function.
- self.assertTrue(checks.no_vi_headers(
- '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
- self.assertTrue(checks.no_vi_headers(
- '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
- self.assertFalse(checks.no_vi_headers(
- '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
-
def test_service_tags_not_in_module_path(self):
self.assertTrue(checks.service_tags_not_in_module_path(
"@utils.services('compute')",
diff --git a/test-requirements.txt b/test-requirements.txt
index 196387c..17a7d2a 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,8 +1,9 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-hacking>=1.1.0,<1.2.0 # Apache-2.0
+hacking>=3.0.1,<3.1.0;python_version>='3.5' # Apache-2.0
mock>=2.0.0 # BSD
coverage!=4.4,>=4.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
+pycodestyle>=2.0.0,<2.6.0 # MIT
flake8-import-order==0.11 # LGPLv3
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 64adcbe..5ffef3e 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -36,6 +36,7 @@
# when the patches are merged.
BLACKLIST = [
'x/gce-api', # It looks gce-api doesn't support python3 yet.
+ 'x/glare', # To avoid sanity-job failure
'x/group-based-policy', # It looks this doesn't support python3 yet.
'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
'openstack/networking-generic-switch',
@@ -48,7 +49,9 @@
'openstack/neutron-dynamic-routing',
# https://review.opendev.org/#/c/637718/
'openstack/neutron-vpnaas', # https://review.opendev.org/#/c/637719/
+ 'x/tap-as-a-service', # To avoid sanity-job failure
'x/valet', # https://review.opendev.org/#/c/638339/
+ 'x/kingbird', # https://bugs.launchpad.net/kingbird/+bug/1869722
]
url = 'https://review.opendev.org/projects/'
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index 961cd09..33675ed 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -98,8 +98,8 @@
if [[ -r doc/source/data/tempest-plugins-registry.footer ]]; then
cat doc/source/data/tempest-plugins-registry.footer
fi
-) > doc/source/plugin-registry.rst
+) > doc/source/plugins/plugin-registry.rst
if [[ -n ${1} ]]; then
- cp doc/source/plugin-registry.rst ${1}/doc/source/plugin-registry.rst
+ cp doc/source/plugins/plugin-registry.rst ${1}/doc/source/plugins/plugin-registry.rst
fi
diff --git a/tools/tempest-integrated-gate-object-storage-blacklist.txt b/tools/tempest-integrated-gate-object-storage-blacklist.txt
index 064cf46..c164343 100644
--- a/tools/tempest-integrated-gate-object-storage-blacklist.txt
+++ b/tools/tempest-integrated-gate-object-storage-blacklist.txt
@@ -9,9 +9,10 @@
tempest.api.identity
# Skip network, compute, keystone only scenario tests
-tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_network_advanced_server_ops
-tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
-tempest.scenario.test_network_v6.TestGettingAddress.test_security_groups_basic_ops
+tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
+tempest.scenario.test_network_v6.TestGettingAddress
+tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_server_sequence_suspend_resume
tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
index 3900f96..1ef6bb5 100644
--- a/tools/tempest-integrated-gate-storage-blacklist.txt
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -8,6 +8,7 @@
tempest.api.identity
# Skip network only scenario tests.
-tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_network_advanced_server_ops
-tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
-tempest.scenario.test_network_v6.TestGettingAddress.test_security_groups_basic_ops
+tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
+tempest.scenario.test_network_v6.TestGettingAddress
+tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index b484a41..2ff4aea 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -60,8 +60,8 @@
fi
}
-: ${UPPER_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
-DEPS="-c${UPPER_CONSTRAINTS_FILE}"
+: ${TOX_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
+DEPS="-c${TOX_CONSTRAINTS_FILE}"
# function to create virtualenv to perform sanity operation
function prepare_workspace {
diff --git a/tox.ini b/tox.ini
index ff31fe8..0477d6f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,11 +1,11 @@
[tox]
-envlist = pep8,py36,py37,py27,bashate,pip-check-reqs
+envlist = pep8,py36,py38,bashate,pip-check-reqs
minversion = 3.1.1
skipsdist = True
ignore_basepython_conflict = True
[tempestenv]
-basepython = python3.6
+basepython = python3
sitepackages = False
setenv =
VIRTUAL_ENV={envdir}
@@ -313,7 +313,6 @@
check-uuid --fix
[hacking]
-local-check-factory = tempest.hacking.checks.factory
import_exceptions = tempest.services
[flake8]
@@ -327,6 +326,26 @@
enable-extensions = H106,H203,H904
import-order-style = pep8
+[flake8:local-plugins]
+extension =
+ T102 = checks:import_no_clients_in_api_and_scenario_tests
+ T104 = checks:scenario_tests_need_service_tags
+ T105 = checks:no_setup_teardown_class_for_tests
+ T107 = checks:service_tags_not_in_module_path
+ T108 = checks:no_hyphen_at_end_of_rand_name
+ N322 = checks:no_mutable_default_args
+ T109 = checks:no_testtools_skip_decorator
+ T110 = checks:get_resources_on_service_clients
+ T111 = checks:delete_resources_on_service_clients
+ T112 = checks:dont_import_local_tempest_into_lib
+ T113 = checks:use_rand_uuid_instead_of_uuid4
+ T114 = checks:dont_use_config_in_tempest_lib
+ T115 = checks:dont_put_admin_tests_on_nonadmin_path
+ T116 = checks:unsupported_exception_attribute_PY3
+ T117 = checks:negative_test_attribute_always_applied_to_negative_tests
+paths =
+ ./tempest/hacking
+
[testenv:releasenotes]
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}