Merge "Port tempest-scenario-all to zuul v3"
diff --git a/HACKING.rst b/HACKING.rst
index 1c084f8..2a7ae1d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -106,7 +106,7 @@
test method. You specify the services with the ``tempest.common.utils.services``
decorator. For example:
-@utils.services('compute', 'image')
+``@utils.services('compute', 'image')``
Valid service tag names are the same as the list of directories in tempest.api
that have tests.
@@ -118,6 +118,59 @@
in ``tempest.api.compute`` would require a service tag for those services,
however they do not need to be tagged as ``compute``.
+Test Attributes
+---------------
+Tempest leverages `test attributes`_ which are a simple but effective way of
+distinguishing between different "types" of API tests. A test can be "tagged"
+with such attributes using the ``decorators.attr`` decorator, for example::
+
+ @decorators.attr(type=['negative'])
+ def test_aggregate_create_aggregate_name_length_less_than_1(self):
+ [...]
+
+These test attributes can be used for test selection via regular expressions.
+For example, ``(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)`` runs all the tests
+in the ``scenario`` test module, *except* for those tagged with the ``slow``
+attribute (via a negative lookahead in the regular expression). These
+attributes are used in Tempest's ``tox.ini`` as well as Tempest's Zuul job
+definitions for specifying particular batches of Tempest test suites to run.
+
+.. _test attributes: https://testtools.readthedocs.io/en/latest/for-test-authors.html?highlight=attr#test-attributes
+
+Negative Attribute
+^^^^^^^^^^^^^^^^^^
+The ``type='negative'`` attribute is used to signify that a test is a negative
+test, which is a test that handles invalid input gracefully. This attribute
+should be applied to all negative test scenarios.
+
+This attribute must be applied to each test that belongs to a negative test
+class, i.e. a test class name ending with "Negative.*" substring.
+
+.. todo::
+
+ Add a hacking check for ensuring that all classes that contain substring
+ "Negative" have the negative attribute decorator applied above each test.
+
+Slow Attribute
+^^^^^^^^^^^^^^
+The ``type='slow'`` attribute is used to signify that a test takes a long time
+to run, relatively speaking. This attribute is usually applied to
+:ref:`scenario tests <scenario_field_guide>`, which involve a complicated
+series of API operations, the total runtime of which can be relatively long.
+This long runtime has performance implications on `Zuul`_ jobs, which is why
+the ``slow`` attribute is leveraged to run slow tests on a selective basis,
+to keep total `Zuul`_ job runtime down to a reasonable time frame.
+
+.. _Zuul: https://docs.openstack.org/infra/zuul/
+
+Smoke Attribute
+^^^^^^^^^^^^^^^
+The ``type='smoke'`` attribute is used to signify that a test is a so-called
+smoke test, which is a type of test that tests the most vital OpenStack
+functionality, like listing servers or flavors or creating volumes. The
+attribute should be sparingly applied to only the tests that sanity-check the
+most essential functionality of an OpenStack cloud.
+
Test fixtures and resources
---------------------------
Test level resources should be cleaned-up after the test execution. Clean-up
@@ -419,34 +472,3 @@
tested is considered stable and adheres to the OpenStack API stability
guidelines. If an API is still considered experimental or in development then
it should not be tested by Tempest until it is considered stable.
-
-Stable Support Policy
----------------------
-
-Since the `Extended Maintenance policy`_ for stable branches was adopted
-OpenStack projects will keep stable branches around after a "stable" or
-"maintained" period for a phase of indeterminate length called "Extended
-Maintenance". Prior to this resolution Tempest supported all stable branches
-which were supported upstream. This policy does not scale under the new model
-as Tempest would be responsible for gating proposed changes against an ever
-increasing number of branches. Therefore due to resource constraints, Tempest
-will only provide support for branches in the "Maintained" phase from the
-documented `Support Phases`_. When a branch moves from the *Maintained* to the
-*Extended Maintenance* phase, Tempest will tag the removal of support for that
-branch as it has in the past when a branch goes end of life.
-
-The expectation for *Extended Maintenance* phase branches is that they will continue
-running Tempest during that phase of support. Since the REST APIs are stable
-interfaces across release boundaries, branches in these phases should run
-Tempest from master as long as possible. But, because we won't be actively
-testing branches in these phases, it's possible that we'll introduce changes to
-Tempest on master which will break support on *Extended Maintenance* phase
-branches. When this happens the expectation for those branches is to either
-switch to running Tempest from a tag with support for the branch, or blacklist
-a newly introduced test (if that is the cause of the issue). Tempest will not
-be creating stable branches to support *Extended Maintenance* phase branches, as
-the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
-project, to support that branch.
-
-.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
-.. _Support Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases
diff --git a/REVIEWING.rst b/REVIEWING.rst
index a880181..8a1e152 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -99,6 +99,39 @@
scenario tests this is up to the reviewers discretion whether a docstring is
required or not.
+
+Test Removal and Refactoring
+----------------------------
+Make sure that any test that is renamed, relocated (e.g. moved to another
+class), or removed does not belong to the `interop`_ testing suite -- which
+includes a select suite of Tempest tests for the purposes of validating that
+OpenStack vendor clouds are interoperable -- or a project's `whitelist`_ or
+`blacklist`_ files.
+
+It is of critical importance that no interop, whitelist or blacklist test
+reference be broken by a patch set introduced to Tempest that renames,
+relocates or removes a referenced test.
+
+Please check the existence of code which references Tempest tests with:
+http://codesearch.openstack.org/
+
+Interop
+^^^^^^^
+Make sure that modifications to an `interop`_ test are backwards-compatible.
+This means that code modifications to tests should not undermine the quality of
+the validation currently performed by the test or significantly alter the
+behavior of the test.
+
+Removal
+^^^^^^^
+Reference the :ref:`test-removal` guidelines for understanding best practices
+associated with test removal.
+
+.. _interop: https://www.openstack.org/brand/interop
+.. _whitelist: https://docs.openstack.org/tempest/latest/run.html#test-selection
+.. _blacklist: https://docs.openstack.org/tempest/latest/run.html#test-selection
+
+
Release Notes
-------------
Release notes are how we indicate to users and other consumers of Tempest what
@@ -113,16 +146,18 @@
.. _reno: https://docs.openstack.org/reno/latest/
+
Deprecated Code
---------------
Sometimes we have some bugs in deprecated code. Basically, we leave it. Because
we don't need to maintain it. However, if the bug is critical, we might need to
fix it. When it will happen, we will deal with it on a case-by-case basis.
+
When to approve
---------------
* Every patch needs two +2s before being approved.
-* Its ok to hold off on an approval until a subject matter expert reviews it
+* It's ok to hold off on an approval until a subject matter expert reviews it
* If a patch has already been approved but requires a trivial rebase to merge,
you do not have to wait for a second +2, since the patch has already had
two +2s.
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index d0d7320..2e5f706 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -172,7 +172,7 @@
resize test).
Using a smaller flavor is generally recommended. When larger flavors are used,
-the extra time required to bring up servers will likely affect total run time
+the extra time required to bring up servers will likely affect the total run time
and probably require tweaking timeout values to ensure tests have ample time to
finish.
@@ -207,7 +207,7 @@
The behavior of these options is a bit convoluted (which will likely be fixed in
future versions). You first need to specify ``img_dir``, which is the directory
-in which Tempest will look for the image files. First it will check if the
+in which Tempest will look for the image files. First, it will check if the
filename set for ``img_file`` could be found in ``img_dir``. If it is found then
the ``img_container_format`` and ``img_disk_format`` options are used to upload
that image to glance. However, if it is not found, Tempest will look for the
@@ -239,7 +239,7 @@
""""""""""""""""""""""""""""""""""
When Tempest creates servers for testing, some tests require being able to
connect those servers. Depending on the configuration of the cloud, the methods
-for doing this can be different. In certain configurations it is required to
+for doing this can be different. In certain configurations, it is required to
specify a single network with server create calls. Accordingly, Tempest provides
a few different methods for providing this information in configuration to try
and ensure that regardless of the cloud's configuration it'll still be able to
@@ -297,10 +297,10 @@
''''''''''''''''''''''''
With dynamic credentials enabled and using nova-network, your only option for
configuration is to either set a fixed network name or not. However, in most
-cases it shouldn't matter because nova-network should have no problem booting a
+cases, it shouldn't matter because nova-network should have no problem booting a
server with multiple networks. If this is not the case for your cloud then using
an accounts file is recommended because it provides the necessary flexibility to
-describe your configuration. Dynamic credentials is not able to dynamically
+describe your configuration. Dynamic credentials are not able to dynamically
allocate things as necessary if Neutron is not enabled.
With Neutron and dynamic credentials enabled there should not be any additional
@@ -352,7 +352,7 @@
OpenStack is really a constellation of several different projects which
are running together to create a cloud. However which projects you're running
is not set in stone, and which services are running is up to the deployer.
-Tempest however needs to know which services are available so it can figure
+Tempest, however, needs to know which services are available so it can figure
out which tests it is able to run and certain setup steps which differ based
on the available services.
@@ -390,8 +390,8 @@
.. note::
- Tempest does not serve all kinds of fancy URLs in the service catalog. The
- service catalog should be in a standard format (which is going to be
+ Tempest does not serve all kinds of fancy URLs in the service catalog.
+ The service catalog should be in a standard format (which is going to be
standardized at the Keystone level).
Tempest expects URLs in the Service catalog in the following format:
@@ -413,10 +413,10 @@
certain operations and features aren't supported depending on the configuration.
These features may or may not be discoverable from the API so the burden is
often on the user to figure out what is supported by the cloud they're talking
-to. Besides the obvious interoperability issues with this it also leaves
+to. Besides the obvious interoperability issues with this, it also leaves
Tempest in an interesting situation trying to figure out which tests are
expected to work. However, Tempest tests do not rely on dynamic API discovery
-for a feature (assuming one exists). Instead Tempest has to be explicitly
+for a feature (assuming one exists). Instead, Tempest has to be explicitly
configured as to which optional features are enabled. This is in order to
prevent bugs in the discovery mechanisms from masking failures.
@@ -432,8 +432,8 @@
^^^^^^^^^^^^^^
The service feature-enabled sections often contain an ``api-extensions`` option
(or in the case of Swift a ``discoverable_apis`` option). This is used to tell
-Tempest which api extensions (or configurable middleware) is used in your
+Tempest which API extensions (or configurable middleware) is used in your
deployment. It has two valid config states: either it contains a single value
-``all`` (which is the default) which means that every api extension is assumed
+``all`` (which is the default) which means that every API extension is assumed
to be enabled, or it is set to a list of each individual extension that is
enabled for that service.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f562850..fecf98a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -80,6 +80,14 @@
library
+Support Policy
+--------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_support_policy
+
Indices and tables
==================
diff --git a/doc/source/library.rst b/doc/source/library.rst
index 14415ae..6a12c45 100644
--- a/doc/source/library.rst
+++ b/doc/source/library.rst
@@ -4,12 +4,12 @@
=============================
Tempest provides a stable library interface that provides external tools or
-test suites an interface for reusing pieces of tempest code. Any public
-interface that lives in tempest/lib in the tempest repo is treated as a stable
+test suites an interface for reusing pieces of Tempest code. Any public
+interface that lives in tempest/lib in the Tempest repo is treated as a stable
public interface and it should be safe to external consume that. Every effort
goes into maintaining backwards compatibility with any change.
The library is self contained and doesn't have any dependency on
-other tempest internals outside of lib (including no usage of tempest
+other Tempest internals outside of lib (including no usage of Tempest
configuration).
Stability
@@ -32,7 +32,7 @@
Making changes
''''''''''''''
When making changes to tempest/lib you have to be conscious of the effect of
-any changes on external consumers. If your proposed changeset will change the
+any changes on external consumers. If your proposed change set will change the
default behaviour of any interface, or make something which previously worked
not after your change, then it is not acceptable. Every effort needs to go into
preserving backwards compatibility in changes.
@@ -40,8 +40,8 @@
Reviewing
'''''''''
When reviewing a proposed change to tempest/lib code we need to be careful to
-ensure that we don't break backwards compatibility. For patches that change
-existing interfaces we have to be careful to make sure we don't break any
+ensure that we don't break backward compatibility. For patches that change
+existing interfaces, we have to be careful to make sure we don't break any
external consumers. Some common red flags are:
* a change to an existing API requires a change outside the library directory
@@ -52,7 +52,7 @@
'''''''
When adding a new interface to the library we need to at a minimum have unit
test coverage. A proposed change to add an interface to tempest/lib that
-doesn't have unit tests shouldn't be accepted. Ideally these unit tests will
+doesn't have unit tests shouldn't be accepted. Ideally, these unit tests will
provide sufficient coverage to ensure a stable interface moving forward.
Current Library APIs
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index ea868ae..6dd00d3 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -358,22 +358,30 @@
.. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+ * `2.53`_
+
+ .. _2.53: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-pike
+
* `2.54`_
- .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id4
+ .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
* `2.55`_
- .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
+ .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
* `2.57`_
- .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id51
+ .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52
* `2.60`_
.. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-queens
+ * `2.61`_
+
+ .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
+
* `2.63`_
.. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index 6f6621d..9958792 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -5,24 +5,24 @@
=============================
Tempest has an external test plugin interface which enables anyone to integrate
-an external test suite as part of a tempest run. This will let any project
-leverage being run with the rest of the tempest suite while not requiring the
-tests live in the tempest tree.
+an external test suite as part of a Tempest run. This will let any project
+leverage being run with the rest of the Tempest suite while not requiring the
+tests live in the Tempest tree.
Creating a plugin
=================
Creating a plugin is fairly straightforward and doesn't require much additional
effort on top of creating a test suite using tempest.lib. One thing to note with
-doing this is that the interfaces exposed by tempest are not considered stable
-(with the exception of configuration variables which ever effort goes into
-ensuring backwards compatibility). You should not need to import anything from
-tempest itself except where explicitly noted.
+doing this is that the interfaces exposed by Tempest are not considered stable
+(with the exception of configuration variables whichever effort goes into
+ensuring backward compatibility). You should not need to import anything from
+Tempest itself except where explicitly noted.
Stable Tempest APIs plugins may use
-----------------------------------
-As noted above, several tempest APIs are acceptable to use from plugins, while
+As noted above, several Tempest APIs are acceptable to use from plugins, while
others are not. A list of stable APIs available to plugins is provided below:
* tempest.lib.*
@@ -32,7 +32,7 @@
* tempest.clients
* tempest.test
-If there is an interface from tempest that you need to rely on in your plugin
+If there is an interface from Tempest that you need to rely on in your plugin
which is not listed above, it likely needs to be migrated to tempest.lib. In
that situation, file a bug, push a migration patch, etc. to expedite providing
the interface in a reliable manner.
@@ -62,7 +62,7 @@
-----------
Once you've created your plugin class you need to add an entry point to your
-project to enable tempest to find the plugin. The entry point must be added
+project to enable Tempest to find the plugin. The entry point must be added
to the "tempest.test_plugins" namespace.
If you are using pbr this is fairly straightforward, in the setup.cfg just add
@@ -77,9 +77,9 @@
Standalone Plugin vs In-repo Plugin
-----------------------------------
-Since all that's required for a plugin to be detected by tempest is a valid
+Since all that's required for a plugin to be detected by Tempest is a valid
setuptools entry point in the proper namespace there is no difference from the
-tempest perspective on either creating a separate python package to
+Tempest perspective on either creating a separate python package to
house the plugin or adding the code to an existing python project. However,
there are tradeoffs to consider when deciding which approach to take when
creating a new plugin.
@@ -91,9 +91,9 @@
single version of the test code across project release boundaries (see the
`Branchless Tempest Spec`_ for more details on this). It also greatly
simplifies the install time story for external users. Instead of having to
-install the right version of a project in the same python namespace as tempest
+install the right version of a project in the same python namespace as Tempest
they simply need to pip install the plugin in that namespace. It also means
-that users don't have to worry about inadvertently installing a tempest plugin
+that users don't have to worry about inadvertently installing a Tempest plugin
when they install another package.
.. _Branchless Tempest Spec: http://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
@@ -108,9 +108,9 @@
Plugin Class
============
-To provide tempest with all the required information it needs to be able to run
-your plugin you need to create a plugin class which tempest will load and call
-to get information when it needs. To simplify creating this tempest provides an
+To provide Tempest with all the required information it needs to be able to run
+your plugin you need to create a plugin class which Tempest will load and call
+to get information when it needs. To simplify creating this Tempest provides an
abstract class that should be used as the parent for your plugin. To use this
you would do something like the following:
@@ -147,7 +147,7 @@
services/
client.py
-That will mirror what people expect from tempest. The file
+That will mirror what people expect from Tempest. The file
* **config.py**: contains any plugin specific configuration variables
* **plugin.py**: contains the plugin class used for the entry point
@@ -156,14 +156,14 @@
* **services**: where the plugin specific service clients are
Additionally, when you're creating the plugin you likely want to follow all
-of the tempest developer and reviewer documentation to ensure that the tests
-being added in the plugin act and behave like the rest of tempest.
+of the Tempest developer and reviewer documentation to ensure that the tests
+being added in the plugin act and behave like the rest of Tempest.
Dealing with configuration options
----------------------------------
-Historically Tempest didn't provide external guarantees on its configuration
-options. However, with the introduction of the plugin interface this is no
+Historically, Tempest didn't provide external guarantees on its configuration
+options. However, with the introduction of the plugin interface, this is no
longer the case. An external plugin can rely on using any configuration option
coming from Tempest, there will be at least a full deprecation cycle for any
option before it's removed. However, just the options provided by Tempest
@@ -171,7 +171,7 @@
configuration options you should use the ``register_opts`` and
``get_opt_lists`` methods to pass them to Tempest when the plugin is loaded.
When adding configuration options the ``register_opts`` method gets passed the
-CONF object from tempest. This enables the plugin to add options to both
+CONF object from Tempest. This enables the plugin to add options to both
existing sections and also create new configuration sections for new options.
Service Clients
@@ -325,23 +325,23 @@
Tempest will automatically discover any installed plugins when it is run. So by
just installing the python packages which contain your plugin you'll be using
-them with tempest, nothing else is really required.
+them with Tempest, nothing else is really required.
However, you should take care when installing plugins. By their very nature
-there are no guarantees when running tempest with plugins enabled about the
+there are no guarantees when running Tempest with plugins enabled about the
quality of the plugin. Additionally, while there is no limitation on running
-with multiple plugins it's worth noting that poorly written plugins might not
+with multiple plugins, it's worth noting that poorly written plugins might not
properly isolate their tests which could cause unexpected cross interactions
between plugins.
Notes for using plugins with virtualenvs
----------------------------------------
-When using a tempest inside a virtualenv (like when running under tox) you have
+When using a Tempest inside a virtualenv (like when running under tox) you have
to ensure that the package that contains your plugin is either installed in the
venv too or that you have system site-packages enabled. The virtualenv will
-isolate the tempest install from the rest of your system so just installing the
-plugin package on your system and then running tempest inside a venv will not
+isolate the Tempest install from the rest of your system so just installing the
+plugin package on your system and then running Tempest inside a venv will not
work.
Tempest also exposes a tox job, all-plugin, which will setup a tox virtualenv
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
new file mode 100644
index 0000000..87e3ad1
--- /dev/null
+++ b/doc/source/stable_branch_support_policy.rst
@@ -0,0 +1,30 @@
+Stable Branch Support Policy
+============================
+
+Since the `Extended Maintenance policy`_ for stable branches was adopted
+OpenStack projects will keep stable branches around after a "stable" or
+"maintained" period for a phase of indeterminate length called "Extended
+Maintenance". Prior to this resolution Tempest supported all stable branches
+which were supported upstream. This policy does not scale under the new model
+as Tempest would be responsible for gating proposed changes against an ever
+increasing number of branches. Therefore due to resource constraints, Tempest
+will only provide support for branches in the "Maintained" phase from the
+documented `Support Phases`_. When a branch moves from the *Maintained* to the
+*Extended Maintenance* phase, Tempest will tag the removal of support for that
+branch as it has in the past when a branch goes end of life.
+
+The expectation for *Extended Maintenance* phase branches is that they will continue
+running Tempest during that phase of support. Since the REST APIs are stable
+interfaces across release boundaries, branches in these phases should run
+Tempest from master as long as possible. But, because we won't be actively
+testing branches in these phases, it's possible that we'll introduce changes to
+Tempest on master which will break support on *Extended Maintenance* phase
+branches. When this happens the expectation for those branches is to either
+switch to running Tempest from a tag with support for the branch, or blacklist
+a newly introduced test (if that is the cause of the issue). Tempest will not
+be creating stable branches to support *Extended Maintenance* phase branches, as
+the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
+project, to support that branch.
+
+.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
+.. _Support Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index ddae6e2..e249bdd 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -1,21 +1,23 @@
+.. _test-removal:
+
Tempest Test Removal Procedure
==============================
-Historically tempest was the only way of doing functional testing and
-integration testing in OpenStack. This was mostly only an artifact of tempest
+Historically, Tempest was the only way of doing functional testing and
+integration testing in OpenStack. This was mostly only an artifact of Tempest
being the only proven pattern for doing this, not an artifact of a design
-decision. However, moving forward as functional testing is being spun up in
-each individual project we really only want tempest to be the integration test
-suite it was intended to be; testing the high level interactions between
-projects through REST API requests. In this model there are probably existing
-tests that aren't the best fit living in tempest. However, since tempest is
+decision. However, moving forward, as functional testing is being spun up in
+each individual project, we really only want Tempest to be the integration test
+suite it was intended to be: testing the high-level interactions between
+projects through REST API requests. In this model, there are probably existing
+tests that aren't the best fit living in Tempest. However, since Tempest is
largely still the only gating test suite in this space we can't carelessly rip
out everything from the tree. This document outlines the procedure which was
developed to ensure we minimize the risk for removing something of value from
-the tempest tree.
+the Tempest tree.
-This procedure might seem overly conservative and slow paced, but this is by
-design to try and ensure we don't remove something that is actually providing
+This procedure might seem overly conservative and slow-paced, but this is by
+design to try to ensure we don't remove something that is actually providing
value. Having potential duplication between testing is not a big deal
especially compared to the alternative of removing something which is actually
providing value and is actively catching bugs, or blocking incorrect patches
@@ -27,24 +29,24 @@
3 prong rule for removal
^^^^^^^^^^^^^^^^^^^^^^^^
-In the proposal etherpad we'll be looking for answers to 3 questions
+In the proposal etherpad we'll be looking for answers to 3 questions:
#. The tests proposed for removal must have equiv. coverage in a different
project's test suite (whether this is another gating test project, or an in
tree functional test suite). For API tests preferably the other project will
- have a similar source of friction in place to prevent breaking api changes
- so that we don't regress and let breaking api changes slip through the
+ have a similar source of friction in place to prevent breaking API changes
+ so that we don't regress and let breaking API changes slip through the
gate.
#. The test proposed for removal has a failure rate < 0.50% in the gate over
the past release (the value and interval will likely be adjusted in the
future)
.. _`prong #3`:
-#. There must not be an external user/consumer of tempest
+#. There must not be an external user/consumer of Tempest
that depends on the test proposed for removal
The answers to 1 and 2 are easy to verify. For 1 just provide a link to the new
-test location. If you are linking to the tempest removal patch please also put
+test location. If you are linking to the Tempest removal patch please also put
a Depends-On in the commit message for the commit which moved the test into
another repo.
@@ -91,32 +93,35 @@
#. paste the output table with numbers and the mysql command you ran to
generate it into the etherpad.
-Eventually a cli interface will be created to make that a bit more friendly.
+Eventually, a CLI interface will be created to make that a bit more friendly.
Also a dashboard is in the works so we don't need to manually run the command.
The intent of the 2nd prong is to verify that moving the test into a project
-specific testing is preventing bugs (assuming the tempest tests were catching
-issues) from bubbling up a layer into tempest jobs. If we're seeing failure
+specific testing is preventing bugs (assuming the Tempest tests were catching
+issues) from bubbling up a layer into Tempest jobs. If we're seeing failure
rates above a certain threshold in the gate checks that means the functional
testing isn't really being effective in catching that bug (and therefore
-blocking it from landing) and having the testing run in tempest still has
+blocking it from landing) and having the testing run in Tempest still has
value.
-However for the 3rd prong verification is a bit more subjective. The original
+However, for the 3rd prong verification is a bit more subjective. The original
intent of this prong was mostly for refstack/defcore and also for things that
running on the stable branches. We don't want to remove any tests if that
-would break our api consistency checking between releases, or something that
-defcore/refstack is depending on being in tempest. It's worth pointing out
-that if a test is used in defcore as part of interop testing then it will
-probably have continuing value being in tempest as part of the
+would break our API consistency checking between releases, or something that
+defcore/refstack is depending on being in Tempest. It's worth pointing out
+that if a test is used in `defcore`_ as part of `interop`_ testing then it will
+probably have continuing value being in Tempest as part of the
integration/integrated tests in general. This is one area where some overlap
-is expected between testing in projects and tempest, which is not a bad thing.
+is expected between testing in projects and Tempest, which is not a bad thing.
+
+.. _defcore: https://wiki.openstack.org/wiki/Governance/InteropWG
+.. _interop: https://www.openstack.org/brand/interop
Discussing the 3rd prong
""""""""""""""""""""""""
There are 2 approaches to addressing the 3rd prong. Either it can be raised
-during a qa meeting during the tempest discussion. Please put it on the agenda
+during a QA meeting during the Tempest discussion. Please put it on the agenda
well ahead of the scheduled meeting. Since the meeting time will be well known
ahead of time anyone who depends on the tests will have ample time beforehand
to outline any concerns on the before the meeting. To give ample time for
@@ -133,19 +138,19 @@
Exceptions to this procedure
----------------------------
-For the most part all tempest test removals have to go through this procedure
+For the most part, all Tempest test removals have to go through this procedure
there are a couple of exceptions though:
-#. The class of testing has been decided to be outside the scope of tempest.
+#. The class of testing has been decided to be outside the scope of Tempest.
#. A revert for a patch which added a broken test, or testing which didn't
actually run in the gate (basically any revert for something which
shouldn't have been added)
#. Tests that would become out of scope as a consequence of an API change,
as described in `API Compatibility`_.
Such tests cannot live in Tempest because of the branchless nature of
- Tempest. Such test must still honor `prong #3`_.
+ Tempest. Such tests must still honor `prong #3`_.
-For the first exception type the only types of testing in tree which have been
+For the first exception type, the only types of testing in the tree which have been
declared out of scope at this point are:
* The CLI tests (which should be completely removed at this point)
@@ -154,14 +159,14 @@
* XML API Tests (which should be completely removed at this point)
* EC2 API/boto tests (which should be completely removed at this point)
-For tests that fit into this category the only criteria for removal is that
+For tests that fit into this category, the only criteria for removal is that
there is equivalent testing elsewhere.
Tempest Scope
^^^^^^^^^^^^^
-Starting in the liberty cycle tempest has defined a set of projects which
-are defined as in scope for direct testing in tempest. As of today that list
+Starting in the liberty cycle Tempest, has defined a set of projects which
+are defined as in scope for direct testing in Tempest. As of today that list
is:
* Keystone
@@ -171,23 +176,23 @@
* Neutron
* Swift
-anything that lives in tempest which doesn't test one of these projects can be
+Anything that lives in Tempest which doesn't test one of these projects can be
removed assuming there is equivalent testing elsewhere. Preferably using the
`tempest plugin mechanism`_
-to maintain continuity after migrating the tests out of tempest.
+to maintain continuity after migrating the tests out of Tempest.
.. _tempest plugin mechanism: https://docs.openstack.org/tempest/latest/plugin.html
API Compatibility
"""""""""""""""""
-If an API introduces a non-discoverable, backward incompatible change, and
-such change is not backported to all versions supported by Tempest, tests for
+If an API introduces a non-discoverable, backward-incompatible change, and
+such a change is not backported to all versions supported by Tempest, tests for
that API cannot live in Tempest anymore.
This is because tests would not be able to know or control which API response
to expect, and thus would not be able to enforce a specific behavior.
-If a test exists in Tempest that would meet this criteria as consequence of a
-change, the test must be removed according to the procedure discussed into
+If a test exists in Tempest that would meet these criteria as a consequence of a
+change, the test must be removed according to the procedure discussed in
this document. The API change should not be merged until all conditions
required for test removal can be met.
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index fff2405..0a29b7b 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -4,7 +4,7 @@
##########################
This guide serves as a starting point for developers working on writing new
-Tempest tests. At a high level tests in Tempest are just tests that conform to
+Tempest tests. At a high level, tests in Tempest are just tests that conform to
the standard python `unit test`_ framework. But there are several aspects of
that are unique to Tempest and its role as an integration test suite running
against a real cloud.
diff --git a/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml b/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml
new file mode 100644
index 0000000..03d0ae8
--- /dev/null
+++ b/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add ``enable_service``, ``disable_service`` , ``disable_log_reason``,
+ ``freeze_host`` and ``thaw_host`` API endpoints to volume v3
+ ``services_client``.
diff --git a/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml b/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
index b54ee8b..19d47d1 100644
--- a/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
+++ b/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
@@ -1,11 +1,9 @@
---
-prelude: >
- When using OVS HW offload feature we need to create
- Neutron port with a certain capability. This is done
- by creating Neutron port with binding profile. To be
- able to test this we need profile capability support
- in Tempest as well.
features:
- A new config option 'port_profile' is added to the section
'network' to specify capabilities of the port.
- By default this is set to {}.
+ By default this is set to {}. When using OVS HW offload
+ feature we need to create Neutron port with a certain
+ capability. This is done by creating Neutron port with
+ binding profile. To be able to test this we need profile
+ capability support in Tempest as well.
diff --git a/releasenotes/notes/omit_X-Subject-Token_from_log-1bf5fef88c80334b.yaml b/releasenotes/notes/omit_X-Subject-Token_from_log-1bf5fef88c80334b.yaml
new file mode 100644
index 0000000..51c8f79
--- /dev/null
+++ b/releasenotes/notes/omit_X-Subject-Token_from_log-1bf5fef88c80334b.yaml
@@ -0,0 +1,7 @@
+---
+security:
+ - |
+ The x-subject-token of a response header is ommitted from log,
+ but clients specify the same token on a request header on
+ Keystone API and that was not omitted. In this release,
+ that has been omitted for a security reason.
diff --git a/releasenotes/notes/remove-allow_tenant_isolation-option-03f0d998eb498d44.yaml b/releasenotes/notes/remove-allow_tenant_isolation-option-03f0d998eb498d44.yaml
new file mode 100644
index 0000000..4f4516b
--- /dev/null
+++ b/releasenotes/notes/remove-allow_tenant_isolation-option-03f0d998eb498d44.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Remove deprecated config option ``allow_tenant_isolation`` from
+ ``auth`` and ``compute`` groups. Use ``use_dynamic_credentials`` directly
+ instead of the removed option.
diff --git a/releasenotes/notes/tempest-lib-compute-update-service-6019d2dcfe4a1c5d.yaml b/releasenotes/notes/tempest-lib-compute-update-service-6019d2dcfe4a1c5d.yaml
new file mode 100644
index 0000000..d67cdb8
--- /dev/null
+++ b/releasenotes/notes/tempest-lib-compute-update-service-6019d2dcfe4a1c5d.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ The ``update_service`` API is added to the ``services_client`` compute
+ library. This API is introduced in microversion 2.53 and supersedes
+ the following APIs:
+
+ * ``PUT /os-services/disable`` (``disable_service``)
+ * ``PUT /os-services/disable-log-reason`` (``disable_log_reason``)
+ * ``PUT /os-services/enable`` (``enable_service``)
+ * ``PUT /os-services/force-down`` (``update_forced_down``)
diff --git a/tempest/api/compute/admin/test_flavors_microversions.py b/tempest/api/compute/admin/test_flavors_microversions.py
index 027af25..9f014e6 100644
--- a/tempest/api/compute/admin/test_flavors_microversions.py
+++ b/tempest/api/compute/admin/test_flavors_microversions.py
@@ -41,3 +41,11 @@
self.flavors_client.list_flavors(detail=True)['flavors']
# Checking list API response schema
self.flavors_client.list_flavors()['flavors']
+
+
+class FlavorsV261TestJSON(FlavorsV255TestJSON):
+ min_microversion = '2.61'
+ max_microversion = 'latest'
+
+ # NOTE(gmann): This class tests the flavors APIs
+ # response schema for the 2.61 microversion.
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index 201670a..993c8ec 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -13,12 +13,14 @@
# under the License.
from tempest.api.compute import base
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class ServicesAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Services API. List and Enable/Disable require admin privileges."""
+ max_microversion = '2.52'
@classmethod
def setup_clients(cls):
@@ -35,7 +37,8 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d0884a69-f693-4e79-a9af-232d15643bf7')
def test_get_service_by_invalid_params(self):
- # return all services if send the request with invalid parameter
+ # Expect all services to be returned when the request contains invalid
+ # parameters.
services = self.client.list_services()['services']
services_xxx = (self.client.list_services(xxx='nova-compute')
['services'])
@@ -58,3 +61,45 @@
services = self.client.list_services(host='xxx',
binary=binary_name)['services']
self.assertEmpty(services)
+
+
+class ServicesAdminNegativeV253TestJSON(ServicesAdminNegativeTestJSON):
+ min_microversion = '2.53'
+ max_microversion = 'latest'
+
+ # NOTE(felipemonteiro): This class tests the services APIs response schema
+ # for the 2.53 microversion. Schema testing is done for `list_services`
+ # tests.
+
+ @classmethod
+ def resource_setup(cls):
+ super(ServicesAdminNegativeV253TestJSON, cls).resource_setup()
+ # Nova returns 400 if `binary` is not nova-compute.
+ cls.binary = 'nova-compute'
+ cls.fake_service_id = data_utils.rand_uuid()
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('508671aa-c929-4479-bd10-8680d40dd0a6')
+ def test_enable_service_with_invalid_service_id(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.client.update_service,
+ service_id=self.fake_service_id,
+ status='enabled')
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('a9eeeade-42b3-419f-87aa-c9342aa068cf')
+ def test_disable_service_with_invalid_service_id(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.client.update_service,
+ service_id=self.fake_service_id,
+ status='disabled')
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('f46a9d91-1e85-4b96-8e7a-db7706fa2e9a')
+ def test_disable_log_reason_with_invalid_service_id(self):
+ # disabled_reason requires that status='disabled' be provided.
+ self.assertRaises(lib_exc.NotFound,
+ self.client.update_service,
+ service_id=self.fake_service_id,
+ status='disabled',
+ disabled_reason='maintenance')
diff --git a/tempest/api/compute/keypairs/test_keypairs_negative.py b/tempest/api/compute/keypairs/test_keypairs_negative.py
index f9050a8..81635ca 100644
--- a/tempest/api/compute/keypairs/test_keypairs_negative.py
+++ b/tempest/api/compute/keypairs/test_keypairs_negative.py
@@ -84,6 +84,6 @@
@decorators.idempotent_id('45fbe5e0-acb5-49aa-837a-ff8d0719db91')
def test_create_keypair_invalid_name(self):
# Keypairs with name being an invalid name should not be created
- k_name = 'key_/.\@:'
+ k_name = r'key_/.\@:'
self.assertRaises(lib_exc.BadRequest, self.create_keypair,
k_name)
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index ff8ed61..5d9bf48 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -320,7 +320,9 @@
try:
self.assertEmpty(md_dict['devices'])
return True
- except Exception:
+ except AssertionError:
+ LOG.debug("Related bug 1775947. Devices dict is not empty: %s",
+ md_dict['devices'])
return False
@decorators.idempotent_id('3e41c782-2a89-4922-a9d2-9a188c4e7c7c')
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 14aecfd..3dffd01 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -227,7 +227,7 @@
@decorators.idempotent_id('24a89b0c-0d55-4a28-847f-45075f19b27b')
def test_list_servers_filtered_by_name_regex(self):
# list of regex that should match s1, s2 and s3
- regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
+ regexes = [r'^.*\-instance\-[0-9]+$', r'^.*\-instance\-.*$']
for regex in regexes:
params = {'name': regex}
body = self.client.list_servers(**params)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 9fc5af0..350e8ba 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -369,6 +369,42 @@
server = self.client.show_server(self.server_id)['server']
self.assertEqual(self.flavor_ref, server['flavor']['id'])
+ @decorators.idempotent_id('fbbf075f-a812-4022-bc5c-ccb8047eef12')
+ @decorators.related_bug('1737599')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @utils.services('volume')
+ def test_resize_server_revert_with_volume_attached(self):
+ # Tests attaching a volume to a server instance and then resizing
+ # the instance. Once the instance is resized, revert the resize which
+ # should move the instance and volume attachment back to the original
+ # compute host.
+
+ # Create a blank volume and attach it to the server created in setUp.
+ volume = self.create_volume()
+ server = self.client.show_server(self.server_id)['server']
+ self.attach_volume(server, volume)
+ # Now resize the server with the blank volume attached.
+ self.client.resize_server(self.server_id, self.flavor_ref_alt)
+ # Explicitly delete the server to get a new one for later
+ # tests. Avoids resize down race issues.
+ self.addCleanup(self.delete_server, self.server_id)
+ waiters.wait_for_server_status(
+ self.client, self.server_id, 'VERIFY_RESIZE')
+ # Now revert the resize which should move the instance and it's volume
+ # attachment back to the original source compute host.
+ self.client.revert_resize_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+ # Make sure everything still looks OK.
+ server = self.client.show_server(self.server_id)['server']
+ # The flavor id is not returned in the server response after
+ # microversion 2.46 so handle that gracefully.
+ if server['flavor'].get('id'):
+ self.assertEqual(self.flavor_ref, server['flavor']['id'])
+ attached_volumes = server['os-extended-volumes:volumes_attached']
+ self.assertEqual(1, len(attached_volumes))
+ self.assertEqual(volume['id'], attached_volumes[0]['id'])
+
@decorators.idempotent_id('b963d4f1-94b3-4c40-9e97-7b583f46e470')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 5fb1711..f810ec5 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -28,6 +28,7 @@
# TODO(mriedem): Remove this test class once the nova queens branch goes into
# extended maintenance mode.
class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
+ max_microversion = '2.43'
depends_on_nova_network = True
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index ec4d7a8..f6e8bc9 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -23,6 +23,7 @@
# TODO(mriedem): Remove this test class once the nova queens branch goes into
# extended maintenance mode.
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
+ max_microversion = '2.43'
depends_on_nova_network = True
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 97a1f36..72b6be4 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -121,11 +121,7 @@
# Create a domain with a user and a group in it
domain = self.setup_test_domain()
user = self.create_test_user(domain_id=domain['id'])
- group = self.groups_client.create_group(
- name=data_utils.rand_name('group'),
- domain_id=domain['id'])['group']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.groups_client.delete_group, group['id'])
+ group = self.setup_test_group(domain_id=domain['id'])
# Delete the domain
self.delete_domain(domain['id'])
# Check the domain, its users and groups are gone
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 874aaa4..2cd8906 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -20,6 +20,10 @@
class EndPointsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index d54e222..4c3eb1c 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -1,4 +1,3 @@
-
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
@@ -21,6 +20,10 @@
class EndpointsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 507810b..37ce266 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -30,50 +30,46 @@
@decorators.idempotent_id('2e80343b-6c81-4ac3-88c7-452f3e9d5129')
def test_group_create_update_get(self):
+ # Verify group creation works.
name = data_utils.rand_name('Group')
description = data_utils.rand_name('Description')
- group = self.groups_client.create_group(
- name=name, domain_id=self.domain['id'],
- description=description)['group']
- self.addCleanup(self.groups_client.delete_group, group['id'])
+ group = self.setup_test_group(name=name, domain_id=self.domain['id'],
+ description=description)
self.assertEqual(group['name'], name)
self.assertEqual(group['description'], description)
+ self.assertEqual(self.domain['id'], group['domain_id'])
- new_name = data_utils.rand_name('UpdateGroup')
- new_desc = data_utils.rand_name('UpdateDescription')
+ # Verify updating name and description works.
+ first_name_update = data_utils.rand_name('UpdateGroup')
+ first_desc_update = data_utils.rand_name('UpdateDescription')
updated_group = self.groups_client.update_group(
- group['id'], name=new_name, description=new_desc)['group']
- self.assertEqual(updated_group['name'], new_name)
- self.assertEqual(updated_group['description'], new_desc)
+ group['id'], name=first_name_update,
+ description=first_desc_update)['group']
+ self.assertEqual(updated_group['name'], first_name_update)
+ self.assertEqual(updated_group['description'], first_desc_update)
+ # Verify that the updated values are reflected after performing show.
new_group = self.groups_client.show_group(group['id'])['group']
self.assertEqual(group['id'], new_group['id'])
- self.assertEqual(new_name, new_group['name'])
- self.assertEqual(new_desc, new_group['description'])
+ self.assertEqual(first_name_update, new_group['name'])
+ self.assertEqual(first_desc_update, new_group['description'])
- @decorators.idempotent_id('b66eb441-b08a-4a6d-81ab-fef71baeb26c')
- def test_group_update_with_few_fields(self):
- name = data_utils.rand_name('Group')
- old_description = data_utils.rand_name('Description')
- group = self.groups_client.create_group(
- name=name, domain_id=self.domain['id'],
- description=old_description)['group']
- self.addCleanup(self.groups_client.delete_group, group['id'])
-
- new_name = data_utils.rand_name('UpdateGroup')
+ # Verify that updating a single field for a group (name) leaves the
+ # other fields (description, domain_id) unchanged.
+ second_name_update = data_utils.rand_name(
+ self.__class__.__name__ + 'UpdateGroup')
updated_group = self.groups_client.update_group(
- group['id'], name=new_name)['group']
- self.assertEqual(new_name, updated_group['name'])
- # Verify that 'description' is not being updated or deleted.
- self.assertEqual(old_description, updated_group['description'])
+ group['id'], name=second_name_update)['group']
+ self.assertEqual(second_name_update, updated_group['name'])
+ # Verify that 'description' and 'domain_id' were not updated or
+ # deleted.
+ self.assertEqual(first_desc_update, updated_group['description'])
+ self.assertEqual(self.domain['id'], updated_group['domain_id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('1598521a-2f36-4606-8df9-30772bd51339')
def test_group_users_add_list_delete(self):
- name = data_utils.rand_name('Group')
- group = self.groups_client.create_group(
- name=name, domain_id=self.domain['id'])['group']
- self.addCleanup(self.groups_client.delete_group, group['id'])
+ group = self.setup_test_group(domain_id=self.domain['id'])
# add user into group
users = []
for _ in range(3):
@@ -100,11 +96,8 @@
# create two groups, and add user into them
groups = []
for _ in range(2):
- name = data_utils.rand_name('Group')
- group = self.groups_client.create_group(
- name=name, domain_id=self.domain['id'])['group']
+ group = self.setup_test_group(domain_id=self.domain['id'])
groups.append(group)
- self.addCleanup(self.groups_client.delete_group, group['id'])
self.groups_client.add_group_user(group['id'], user['id'])
# list groups which user belongs to
user_groups = self.users_client.list_user_groups(user['id'])['groups']
@@ -118,12 +111,7 @@
group_ids = list()
fetched_ids = list()
for _ in range(3):
- name = data_utils.rand_name('Group')
- description = data_utils.rand_name('Description')
- group = self.groups_client.create_group(
- name=name, domain_id=self.domain['id'],
- description=description)['group']
- self.addCleanup(self.groups_client.delete_group, group['id'])
+ group = self.setup_test_group(domain_id=self.domain['id'])
group_ids.append(group['id'])
# List and Verify Groups
# When domain specific drivers are enabled the operations
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 0845407..8ae43d6 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -19,7 +19,6 @@
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -28,30 +27,6 @@
credentials = ['primary', 'admin', 'alt']
- @decorators.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
- def test_tokens(self):
- # Valid user's token is authenticated
- # Create a User
- u_name = data_utils.rand_name('user')
- u_desc = '%s-description' % u_name
- u_password = data_utils.rand_password()
- user = self.create_test_user(
- name=u_name, description=u_desc, password=u_password)
- # Perform Authentication
- resp = self.token.auth(user_id=user['id'],
- password=u_password).response
- subject_token = resp['x-subject-token']
- self.client.check_token_existence(subject_token)
- # Perform GET Token
- token_details = self.client.show_token(subject_token)['token']
- self.assertEqual(resp['x-subject-token'], subject_token)
- self.assertEqual(token_details['user']['id'], user['id'])
- self.assertEqual(token_details['user']['name'], u_name)
- # Perform Delete Token
- self.client.delete_token(subject_token)
- self.assertRaises(lib_exc.NotFound, self.client.check_token_existence,
- subject_token)
-
@decorators.idempotent_id('565fa210-1da1-4563-999b-f7b5b67cf112')
def test_rescope_token(self):
"""Rescope a token.
@@ -201,10 +176,7 @@
role_id = self.setup_test_role()['id']
# Create a group.
- group_name = data_utils.rand_name('Group')
- group_id = self.groups_client.create_group(
- name=group_name, domain_id=domain_id)['group']['id']
- self.addCleanup(self.groups_client.delete_group, group_id)
+ group_id = self.setup_test_group(domain_id=domain_id)['id']
# Add the alt user to the group.
self.groups_client.add_group_user(group_id, alt_user_id)
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 68f2c07..282343c 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -292,6 +292,20 @@
self.delete_domain, domain['id'])
return domain
+ def setup_test_group(self, **kwargs):
+ """Set up a test group."""
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '_test_project')
+ if 'description' not in kwargs:
+ kwargs['description'] = data_utils.rand_name(
+ self.__class__.__name__ + '_test_description')
+ group = self.groups_client.create_group(**kwargs)['group']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.groups_client.delete_group, group['id'])
+ return group
+
class BaseApplicationCredentialsV3Test(BaseIdentityV3Test):
diff --git a/tempest/api/identity/v2/test_users.py b/tempest/api/identity/v2/test_users.py
index 9c77fff..158dfb3 100644
--- a/tempest/api/identity/v2/test_users.py
+++ b/tempest/api/identity/v2/test_users.py
@@ -84,7 +84,7 @@
new_pass = data_utils.rand_password()
user_id = self.creds.user_id
- # to change password back. important for allow_tenant_isolation = false
+ # to change password back. important for use_dynamic_credentials=false
self.addCleanup(self._restore_password, user_id, old_pass, new_pass)
# user updates own password
diff --git a/tempest/api/identity/v3/test_tokens.py b/tempest/api/identity/v3/test_tokens.py
index 4c72d82..f13aa10 100644
--- a/tempest/api/identity/v3/test_tokens.py
+++ b/tempest/api/identity/v3/test_tokens.py
@@ -91,3 +91,28 @@
self.assertIsNotNone(subject_name, 'Expected user name in token.')
self.assertEqual(resp['methods'][0], 'password')
+
+ @decorators.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
+ def test_token_auth_creation_existence_deletion(self):
+ # Tests basic token auth functionality in a way that is compatible with
+ # pre-provisioned credentials. The default user is used for token
+ # authentication.
+
+ # Valid user's token is authenticated
+ user = self.os_primary.credentials
+ # Perform Authentication
+ resp = self.non_admin_token.auth(
+ user_id=user.user_id, password=user.password).response
+ subject_token = resp['x-subject-token']
+ self.non_admin_client.check_token_existence(subject_token)
+ # Perform GET Token
+ token_details = self.non_admin_client.show_token(
+ subject_token)['token']
+ self.assertEqual(resp['x-subject-token'], subject_token)
+ self.assertEqual(token_details['user']['id'], user.user_id)
+ self.assertEqual(token_details['user']['name'], user.username)
+ # Perform Delete Token
+ self.non_admin_client.delete_token(subject_token)
+ self.assertRaises(lib_exc.NotFound,
+ self.non_admin_client.check_token_existence,
+ subject_token)
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 1f099df..6d6baca 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -82,7 +82,7 @@
old_token = self.non_admin_client.token
new_pass = data_utils.rand_password()
- # to change password back. important for allow_tenant_isolation = false
+ # to change password back. important for use_dynamic_credentials=false
self.addCleanup(self._restore_password, old_pass, new_pass)
# user updates own password
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 6849653..e79f8c3 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -59,7 +59,7 @@
# Try to create a third network while the quota is two
with self.assertRaisesRegex(
lib_exc.Conflict,
- "Quota exceeded for resources: \['network'\].*"):
+ r"Quota exceeded for resources: \['network'\].*"):
n3 = self.networks_client.create_network()
self.addCleanup(self.networks_client.delete_network,
n3['network']['id'])
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index ef4a23a..b4bb88e 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -15,6 +15,7 @@
from tempest.api.network import base
from tempest.common import utils
+from tempest.common.utils import data_utils
from tempest.common.utils import net_utils
from tempest import config
from tempest.lib import decorators
@@ -158,11 +159,21 @@
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
- network2 = self.create_network()
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network2 = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network2['id'])
subnet2 = self.create_subnet(network2)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])
router2 = self.create_router(external_network_id=self.ext_net_id)
+ self.addCleanup(self.routers_client.delete_router, router2['id'])
self.create_router_interface(router2['id'], subnet2['id'])
+ self.addCleanup(self.routers_client.remove_router_interface,
+ router2['id'], subnet_id=subnet2['id'])
port_other_router = self.create_port(network2)
+ self.addCleanup(self.ports_client.delete_port,
+ port_other_router['id'])
# Associate floating IP to the other port on another router
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5168423..246a5c3 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -52,6 +52,21 @@
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
+ def _create_subnet(self, network, gateway='',
+ cidr=None, mask_bits=None, **kwargs):
+ subnet = self.create_subnet(network, gateway, cidr, mask_bits)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ return subnet
+
+ def _create_network(self, network_name=None, **kwargs):
+ network_name = network_name or data_utils.rand_name(
+ self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name, **kwargs)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ return network
+
@decorators.attr(type='smoke')
@decorators.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
@@ -73,7 +88,7 @@
@decorators.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
- network2 = self.create_network()
+ network2 = self._create_network()
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.ports_client.create_bulk_ports(ports=port_list)
@@ -90,7 +105,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
- network = self.create_network()
+ network = self._create_network()
net_id = network['id']
address = self.cidr
address.prefixlen = self.mask_bits
@@ -100,10 +115,9 @@
raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
'end': str(address[-2])}]}
- subnet = self.create_subnet(network, cidr=address,
- mask_bits=address.prefixlen,
- **allocation_pools)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ self._create_subnet(network, cidr=address,
+ mask_bits=address.prefixlen,
+ **allocation_pools)
body = self.ports_client.create_port(network_id=net_id)
self.addCleanup(self.ports_client.delete_port, body['port']['id'])
port = body['port']
@@ -153,9 +167,8 @@
@decorators.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
def test_port_list_filter_by_ip(self):
# Create network and subnet
- network = self.create_network()
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ network = self._create_network()
+ self._create_subnet(network)
# Create two ports
port_1 = self.ports_client.create_port(network_id=network['id'])
self.addCleanup(self.ports_client.delete_port, port_1['port']['id'])
@@ -187,10 +200,8 @@
'ip-substring-filtering extension not enabled.')
def test_port_list_filter_by_ip_substr(self):
# Create network and subnet
- network = self.create_network()
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
-
+ network = self._create_network()
+ subnet = self._create_subnet(network)
# Get two IP addresses
ip_address_1 = None
ip_address_2 = None
@@ -261,10 +272,8 @@
@decorators.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
- network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ network = self._create_network()
+ self._create_subnet(network)
router = self.create_router()
self.addCleanup(self.routers_client.delete_router, router['id'])
port = self.ports_client.create_port(network_id=network['id'])
@@ -294,12 +303,9 @@
@decorators.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
- network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
- subnet_1 = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet_1['id'])
- subnet_2 = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet_2['id'])
+ network = self._create_network()
+ subnet_1 = self._create_subnet(network)
+ subnet_2 = self._create_subnet(network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
@@ -323,8 +329,7 @@
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
- subnet_1 = self.create_subnet(self.network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet_1['id'])
+ subnet_1 = self._create_subnet(self.network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
@@ -413,10 +418,8 @@
utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_create_port_with_no_securitygroups(self):
- network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ network = self._create_network()
+ self._create_subnet(network)
port = self.create_port(network, security_groups=[])
self.addCleanup(self.ports_client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index abbb779..3ff12e4 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -39,6 +39,11 @@
self.addCleanup(self._cleanup_router, router)
return router
+ def _create_subnet(self, network, gateway='', cidr=None):
+ subnet = self.create_subnet(network, gateway, cidr)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ return subnet
+
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
interface = self.routers_client.add_router_interface(
router_id, subnet_id=subnet_id)
@@ -65,12 +70,12 @@
'The public_network_id option must be specified.')
def test_create_show_list_update_delete_router(self):
# Create a router
- name = data_utils.rand_name(self.__class__.__name__ + '-router')
+ router_name = data_utils.rand_name(self.__class__.__name__ + '-router')
router = self._create_router(
- name=name,
+ name=router_name,
admin_state_up=False,
external_network_id=CONF.network.public_network_id)
- self.assertEqual(router['name'], name)
+ self.assertEqual(router['name'], router_name)
self.assertEqual(router['admin_state_up'], False)
self.assertEqual(
router['external_gateway_info']['network_id'],
@@ -97,8 +102,12 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
def test_add_remove_router_interface_with_subnet_id(self):
- network = self.create_network()
- subnet = self.create_subnet(network)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ subnet = self._create_subnet(network)
router = self._create_router()
# Add router interface with subnet id
interface = self.routers_client.add_router_interface(
@@ -116,8 +125,12 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
def test_add_remove_router_interface_with_port_id(self):
- network = self.create_network()
- self.create_subnet(network)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ self._create_subnet(network)
router = self._create_router()
port_body = self.ports_client.create_port(
network_id=network['id'])
@@ -183,13 +196,18 @@
# Update router extra route, second ip of the range is
# used as next hop
for i in range(routes_num):
- network = self.create_network()
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
subnet = self.create_subnet(network, cidr=next_cidr)
next_cidr = next_cidr.next()
# Add router interface with subnet id
self.create_router_interface(router['id'], subnet['id'])
-
+ self.addCleanup(self._remove_router_interface_with_subnet_id,
+ router['id'], subnet['id'])
cidr = netaddr.IPNetwork(subnet['cidr'])
next_hop = str(cidr[2])
destination = str(subnet['cidr'])
@@ -242,13 +260,18 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
def test_add_multiple_router_interfaces(self):
- network01 = self.create_network(
- network_name=data_utils.rand_name('router-network01-'))
- network02 = self.create_network(
- network_name=data_utils.rand_name('router-network02-'))
- subnet01 = self.create_subnet(network01)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network01 = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network01['id'])
+ network02 = self.networks_client.create_network(
+ name=data_utils.rand_name(self.__class__.__name__))['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network02['id'])
+ subnet01 = self._create_subnet(network01)
sub02_cidr = self.cidr.next()
- subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
+ subnet02 = self._create_subnet(network02, cidr=sub02_cidr)
router = self._create_router()
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
@@ -261,8 +284,12 @@
@decorators.idempotent_id('96522edf-b4b5-45d9-8443-fa11c26e6eff')
def test_router_interface_port_update_with_fixed_ip(self):
- network = self.create_network()
- subnet = self.create_subnet(network)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ subnet = self._create_subnet(network)
router = self._create_router()
fixed_ip = [{'subnet_id': subnet['id']}]
interface = self._add_router_interface_with_subnet_id(router['id'],
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index 45f4caa..731a055 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -157,6 +157,57 @@
waiters.wait_for_volume_resource_status(
self.groups_client, grp2['id'], 'available')
+ @decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127')
+ @decorators.related_bug('1739031')
+ def test_delete_group_snapshots_following_updated_volumes(self):
+ volume_type = self.create_volume_type()
+
+ group_type = self.create_group_type()
+
+ # Create a volume group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Note: When dealing with consistency groups all volumes must
+ # reside on the same backend. Adding volumes to the same consistency
+ # group from multiple backends isn't supported. In order to ensure all
+ # volumes share the same backend, all volumes must share same
+ # volume-type and group id.
+ volume_list = []
+ for _ in range(2):
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+ volume_list.append(volume['id'])
+
+ for vol in volume_list:
+ self.groups_client.update_group(grp['id'],
+ remove_volumes=vol)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ self.groups_client.update_group(grp['id'],
+ add_volumes=vol)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ # Verify the created volumes are associated with consistency group
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ grp_vols = [v for v in vols if v['group_id'] == grp['id']]
+ self.assertEqual(2, len(grp_vols))
+
+ # Create a snapshot group
+ group_snapshot = self._create_group_snapshot(group_id=grp['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+
+ for snap in snapshots:
+ if snap['volume_id'] in volume_list:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Delete a snapshot group
+ self._delete_group_snapshot(group_snapshot)
+
class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
_api_version = 3
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 20c3538..9907497 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -62,8 +62,16 @@
return message_id
@decorators.idempotent_id('50f29e6e-f363-42e1-8ad1-f67ae7fd4d5a')
- def test_list_messages(self):
- self._create_user_message()
+ def test_list_show_messages(self):
+ message_id = self._create_user_message()
+ self.addCleanup(self.messages_client.delete_message, message_id)
+
+ # show message
+ message = self.messages_client.show_message(message_id)['message']
+ for key in MESSAGE_KEYS:
+ self.assertIn(key, message.keys(), 'Missing expected key %s' % key)
+
+ # list messages
messages = self.messages_client.list_messages()['messages']
self.assertIsInstance(messages, list)
for message in messages:
@@ -71,16 +79,6 @@
self.assertIn(key, message.keys(),
'Missing expected key %s' % key)
- @decorators.idempotent_id('55a4a61e-c7b2-4ba0-a05d-b914bdef3070')
- def test_show_message(self):
- message_id = self._create_user_message()
- self.addCleanup(self.messages_client.delete_message, message_id)
-
- message = self.messages_client.show_message(message_id)['message']
-
- for key in MESSAGE_KEYS:
- self.assertIn(key, message.keys(), 'Missing expected key %s' % key)
-
@decorators.idempotent_id('c6eb6901-cdcc-490f-b735-4fe251842aed')
def test_delete_message(self):
message_id = self._create_user_message()
diff --git a/tempest/api/volume/admin/test_volume_retype_with_migration.py b/tempest/api/volume/admin/test_volume_retype.py
similarity index 67%
rename from tempest/api/volume/admin/test_volume_retype_with_migration.py
rename to tempest/api/volume/admin/test_volume_retype.py
index 025c1be..1c56eb2 100644
--- a/tempest/api/volume/admin/test_volume_retype_with_migration.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
from oslo_log import log as logging
@@ -23,31 +24,7 @@
LOG = logging.getLogger(__name__)
-class VolumeRetypeWithMigrationTest(base.BaseVolumeAdminTest):
-
- @classmethod
- def skip_checks(cls):
- super(VolumeRetypeWithMigrationTest, cls).skip_checks()
-
- if not CONF.volume_feature_enabled.multi_backend:
- raise cls.skipException("Cinder multi-backend feature disabled.")
-
- if len(set(CONF.volume.backend_names)) < 2:
- raise cls.skipException("Requires at least two different "
- "backend names")
-
- @classmethod
- def resource_setup(cls):
- super(VolumeRetypeWithMigrationTest, cls).resource_setup()
- # read backend name from a list.
- backend_src = CONF.volume.backend_names[0]
- backend_dst = CONF.volume.backend_names[1]
-
- extra_specs_src = {"volume_backend_name": backend_src}
- extra_specs_dst = {"volume_backend_name": backend_dst}
-
- cls.src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
- cls.dst_vol_type = cls.create_volume_type(extra_specs=extra_specs_dst)
+class VolumeRetypeTest(base.BaseVolumeAdminTest):
def _wait_for_internal_volume_cleanup(self, vol):
# When retyping a volume, Cinder creates an internal volume in the
@@ -70,43 +47,11 @@
fetched_vol['id'])
break
- def _retype_volume(self, volume):
- keys_with_no_change = ('id', 'size', 'description', 'name', 'user_id',
- 'os-vol-tenant-attr:tenant_id')
- keys_with_change = ('volume_type', 'os-vol-host-attr:host')
+ @abc.abstractmethod
+ def _verify_migration(self, source_vol, dest_vol):
+ pass
- volume_source = self.admin_volume_client.show_volume(
- volume['id'])['volume']
-
- self.volumes_client.retype_volume(
- volume['id'],
- new_type=self.dst_vol_type['name'],
- migration_policy='on-demand')
- self.addCleanup(self._wait_for_internal_volume_cleanup, volume)
- waiters.wait_for_volume_retype(self.volumes_client, volume['id'],
- self.dst_vol_type['name'])
-
- volume_dest = self.admin_volume_client.show_volume(
- volume['id'])['volume']
-
- # Check the volume information after the migration.
- self.assertEqual('success',
- volume_dest['os-vol-mig-status-attr:migstat'])
- self.assertEqual('success', volume_dest['migration_status'])
-
- for key in keys_with_no_change:
- self.assertEqual(volume_source[key], volume_dest[key])
-
- for key in keys_with_change:
- self.assertNotEqual(volume_source[key], volume_dest[key])
-
- @decorators.idempotent_id('a1a41f3f-9dad-493e-9f09-3ff197d477cd')
- def test_available_volume_retype_with_migration(self):
- src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
- self._retype_volume(src_vol)
-
- @decorators.idempotent_id('d0d9554f-e7a5-4104-8973-f35b27ccb60d')
- def test_volume_from_snapshot_retype_with_migration(self):
+ def _create_volume_from_snapshot(self):
# Create a volume in the first backend
src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
@@ -121,5 +66,115 @@
self.snapshots_client.delete_snapshot(snapshot['id'])
self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+ return src_vol
+
+ def _retype_volume(self, volume, migration_policy):
+
+ volume_source = self.admin_volume_client.show_volume(
+ volume['id'])['volume']
+
+ self.volumes_client.retype_volume(
+ volume['id'],
+ new_type=self.dst_vol_type['name'],
+ migration_policy=migration_policy)
+ self.addCleanup(self._wait_for_internal_volume_cleanup, volume)
+ waiters.wait_for_volume_retype(self.volumes_client, volume['id'],
+ self.dst_vol_type['name'])
+
+ volume_dest = self.admin_volume_client.show_volume(
+ volume['id'])['volume']
+
+ self._verify_migration(volume_source, volume_dest)
+
+
+class VolumeRetypeWithMigrationTest(VolumeRetypeTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(VolumeRetypeTest, cls).skip_checks()
+
+ if not CONF.volume_feature_enabled.multi_backend:
+ raise cls.skipException("Cinder multi-backend feature disabled.")
+
+ if len(set(CONF.volume.backend_names)) < 2:
+ raise cls.skipException("Requires at least two different "
+ "backend names")
+
+ @classmethod
+ def resource_setup(cls):
+ super(VolumeRetypeWithMigrationTest, cls).resource_setup()
+ # read backend name from a list.
+ backend_src = CONF.volume.backend_names[0]
+ backend_dst = CONF.volume.backend_names[1]
+
+ extra_specs_src = {"volume_backend_name": backend_src}
+ extra_specs_dst = {"volume_backend_name": backend_dst}
+
+ cls.src_vol_type = cls.create_volume_type(extra_specs=extra_specs_src)
+ cls.dst_vol_type = cls.create_volume_type(extra_specs=extra_specs_dst)
+
+ def _verify_migration(self, volume_source, volume_dest):
+
+ keys_with_no_change = ('id', 'size', 'description', 'name',
+ 'user_id', 'os-vol-tenant-attr:tenant_id')
+ keys_with_change = ('volume_type', 'os-vol-host-attr:host')
+
+ # Check the volume information after the migration.
+ self.assertEqual('success',
+ volume_dest['os-vol-mig-status-attr:migstat'])
+ self.assertEqual('success', volume_dest['migration_status'])
+
+ for key in keys_with_no_change:
+ self.assertEqual(volume_source[key], volume_dest[key])
+
+ for key in keys_with_change:
+ self.assertNotEqual(volume_source[key], volume_dest[key])
+
+ self.assertEqual(volume_dest['volume_type'], self.dst_vol_type['name'])
+
+ @decorators.idempotent_id('a1a41f3f-9dad-493e-9f09-3ff197d477cd')
+ def test_available_volume_retype_with_migration(self):
+ src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
+ self._retype_volume(src_vol, migration_policy='on-demand')
+
+ @decorators.idempotent_id('d0d9554f-e7a5-4104-8973-f35b27ccb60d')
+ def test_volume_from_snapshot_retype_with_migration(self):
+ src_vol = self._create_volume_from_snapshot()
+
# Migrate the volume from snapshot to the second backend
- self._retype_volume(src_vol)
+ self._retype_volume(src_vol, migration_policy='on-demand')
+
+
+class VolumeRetypeWithoutMigrationTest(VolumeRetypeTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(VolumeRetypeWithoutMigrationTest, cls).resource_setup()
+ cls.src_vol_type = cls.create_volume_type('volume-type-1')
+ cls.dst_vol_type = cls.create_volume_type('volume-type-2')
+
+ def _verify_migration(self, volume_source, volume_dest):
+
+ keys_with_no_change = ('id', 'size', 'description', 'name',
+ 'user_id', 'os-vol-tenant-attr:tenant_id',
+ 'os-vol-host-attr:host')
+ keys_with_change = ('volume_type',)
+
+ # Check the volume information after the retype
+ self.assertIsNone(volume_dest['os-vol-mig-status-attr:migstat'])
+ self.assertIsNone(volume_dest['migration_status'])
+
+ for key in keys_with_no_change:
+ self.assertEqual(volume_source[key], volume_dest[key])
+
+ for key in keys_with_change:
+ self.assertNotEqual(volume_source[key], volume_dest[key])
+
+ self.assertEqual(volume_dest['volume_type'], self.dst_vol_type['name'])
+
+ @decorators.idempotent_id('b90412ee-465d-46e9-b249-ec84a47d5f25')
+ def test_available_volume_retype(self):
+ src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
+
+ # Retype the volume from snapshot
+ self._retype_volume(src_vol, migration_policy='never')
diff --git a/tempest/api/volume/admin/test_volume_services_negative.py b/tempest/api/volume/admin/test_volume_services_negative.py
new file mode 100644
index 0000000..6f3dbc6
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services_negative.py
@@ -0,0 +1,65 @@
+# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class VolumeServicesNegativeTest(base.BaseVolumeAdminTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(VolumeServicesNegativeTest, cls).resource_setup()
+ cls.services = cls.admin_volume_services_client.list_services()[
+ 'services']
+ cls.host = cls.services[0]['host']
+ cls.binary = cls.services[0]['binary']
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
+ def test_enable_service_with_invalid_host(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_volume_services_client.enable_service,
+ host='invalid_host', binary=self.binary)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('c571f179-c6e6-4c50-a0ab-368b628a8ac1')
+ def test_disable_service_with_invalid_binary(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_volume_services_client.disable_service,
+ host=self.host, binary='invalid_binary')
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('77767b36-5e8f-4c68-a0b5-2308cc21ec64')
+ def test_disable_log_reason_with_no_reason(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.disable_log_reason,
+ host=self.host, binary=self.binary,
+ disabled_reason=None)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('712bfab8-1f44-4eb5-a632-fa70bf78f05e')
+ def test_freeze_host_with_invalid_host(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.freeze_host,
+ host='invalid_host')
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('7c6287c9-d655-47e1-9a11-76f6657a6dce')
+ def test_thaw_host_with_invalid_host(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.thaw_host,
+ host='invalid_host')
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 35eea11..45060d0 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -60,6 +60,8 @@
# Create backup
backup_name = data_utils.rand_name(self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'], name=backup_name)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.assertEqual(backup_name, backup['name'])
# Export Backup
@@ -105,16 +107,18 @@
self.addCleanup(self.volumes_client.delete_volume,
restore['volume_id'])
self.assertEqual(backup['id'], restore['backup_id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- restore['volume_id'],
- 'available')
+
+ # When restore operation is performed then, backup['id']
+ # goes to 'restoring' state so we need to wait for
+ # backup['id'] to become 'available'.
+ waiters.wait_for_volume_resource_status(
+ self.backups_client, backup['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, restore['volume_id'], 'available')
# Verify if restored volume is there in volume list
volumes = self.volumes_client.list_volumes()['volumes']
self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
- waiters.wait_for_volume_resource_status(self.admin_backups_client,
- import_backup['id'],
- 'available')
@decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
def test_volume_backup_reset_status(self):
@@ -124,6 +128,8 @@
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'], name=backup_name)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.assertEqual(backup_name, backup['name'])
# Reset backup status to error
self.admin_backups_client.reset_backup_status(backup_id=backup['id'],
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 07cfad5..c178272 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -117,6 +117,8 @@
self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'],
name=backup_name, force=True)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
self.assertEqual(backup_name, backup['name'])
@decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
@@ -132,6 +134,8 @@
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
@@ -160,6 +164,8 @@
# Create volume and backup
volume = self.create_volume()
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Update backup and assert response body for update_backup method
update_kwargs = {
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 52114bc..93638b8 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -163,6 +164,8 @@
backup = self.create_backup(volume_id=self.volume_origin['id'],
snapshot_id=snapshot['id'])
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot['id'], 'available')
backup_info = self.backups_client.show_backup(backup['id'])['backup']
self.assertEqual(self.volume_origin['id'], backup_info['volume_id'])
self.assertEqual(snapshot['id'], backup_info['snapshot_id'])
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 1c671ec..9be8ee2 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -311,5 +311,6 @@
resources.extend(generate_resources(cred_provider, opts.admin))
dump_accounts(resources, opts.identity_version, opts.accounts)
+
if __name__ == "__main__":
main()
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index 84c8631..d84f3a3 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-STESTR_CONF = """[DEFAULT]
+STESTR_CONF = r"""[DEFAULT]
test_path=%s
top_dir=%s
group_regex=([^\.]*\.)*
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index a4402fe..8dcf575 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -95,7 +95,7 @@
ip_re = re.compile(r'(^|[^0-9])[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]'
'{1,3}([^0-9]|$)')
url_re = re.compile(r'.*INFO.*Request \((?P<name>.*)\): (?P<code>[\d]{3}) '
- '(?P<verb>\w*) (?P<url>.*) .*')
+ r'(?P<verb>\w*) (?P<url>.*) .*')
port_re = re.compile(r'.*:(?P<port>\d+).*')
path_re = re.compile(r'http[s]?://[^/]*/(?P<path>.*)')
request_re = re.compile(r'.* Request - Headers: (?P<headers>.*)')
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 15af271..aa333b3 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -488,5 +488,6 @@
traceback.print_exc()
raise
+
if __name__ == "__main__":
main()
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index 75db155..c6e5dcb 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -210,6 +210,7 @@
except exceptions.InvalidConfiguration:
return False
+
# === Credentials
# Type of credentials available from configuration
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index ed11b21..c702d88 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -225,9 +225,9 @@
elif key in ('content-type', 'date', 'last-modified',
'x-copied-from-last-modified') and not value:
return InvalidFormat(key, value)
- elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
+ elif key == 'x-timestamp' and not re.match(r"^\d+\.?\d*\Z", value):
return InvalidFormat(key, value)
- elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
+ elif key == 'x-copied-from' and not re.match(r"\S+/\S+", value):
return InvalidFormat(key, value)
elif key == 'x-trans-id' and \
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 225a713..167bf5b 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -40,6 +40,7 @@
self.__dict__[attr] = attr_obj
return attr_obj
+
data_utils = DataUtils()
diff --git a/tempest/config.py b/tempest/config.py
index 1fb5c8e..cc0ba34 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -61,11 +61,7 @@
"users. This option requires that OpenStack Identity "
"API admin credentials are known. If false, isolated "
"test cases and parallel execution, can still be "
- "achieved configuring a list of test accounts",
- deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
- group='auth'),
- cfg.DeprecatedOpt('allow_tenant_isolation',
- group='compute')]),
+ "achieved configuring a list of test accounts"),
cfg.ListOpt('tempest_roles',
help="Roles to assign to all users created by tempest",
default=[]),
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index b6e7f8c..a57a360 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -287,10 +287,10 @@
if pep8.noqa(physical_line):
return
- if not re.match('class .*Test.*\(.*Admin.*\):', logical_line):
+ if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
- if not re.match('.\/tempest\/api\/.*\/admin\/.*', filename):
+ if not re.match(r'.\/tempest\/api\/.*\/admin\/.*', filename):
msg = 'T115: All admin tests should exist under admin path.'
yield(0, msg)
diff --git a/tempest/lib/api_schema/response/compute/v2_1/flavors.py b/tempest/lib/api_schema/response/compute/v2_1/flavors.py
index af5e67f..43e80cc 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/flavors.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/flavors.py
@@ -82,10 +82,6 @@
}
}
-unset_flavor_extra_specs = {
- 'status_code': [200]
-}
-
create_update_get_flavor_details = {
'status_code': [200],
'response_body': {
diff --git a/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py b/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py
index a438d48..3aa1eda 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/flavors_extra_specs.py
@@ -20,7 +20,7 @@
'extra_specs': {
'type': 'object',
'patternProperties': {
- '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+ r'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
},
@@ -29,12 +29,16 @@
}
}
+unset_flavor_extra_specs = {
+ 'status_code': [200]
+}
+
set_get_flavor_extra_specs_key = {
'status_code': [200],
'response_body': {
'type': 'object',
'patternProperties': {
- '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+ r'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
}
diff --git a/tempest/lib/api_schema/response/compute/v2_11/services.py b/tempest/lib/api_schema/response/compute/v2_11/services.py
index 18b833b..9ece1f9 100644
--- a/tempest/lib/api_schema/response/compute/v2_11/services.py
+++ b/tempest/lib/api_schema/response/compute/v2_11/services.py
@@ -44,3 +44,10 @@
'required': ['service']
}
}
+
+# **** Schemas unchanged in microversion 2.11 since microversion 2.1 ****
+# Note(felipemonteiro): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+enable_disable_service = copy.deepcopy(services.enable_disable_service)
+disable_log_reason = copy.deepcopy(services.disable_log_reason)
diff --git a/tempest/lib/api_schema/response/compute/v2_47/servers.py b/tempest/lib/api_schema/response/compute/v2_47/servers.py
index 935be70..5d6d4c3 100644
--- a/tempest/lib/api_schema/response/compute/v2_47/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_47/servers.py
@@ -26,7 +26,7 @@
'extra_specs': {
'type': 'object',
'patternProperties': {
- '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+ r'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
},
diff --git a/tempest/lib/api_schema/response/compute/v2_53/__init__.py b/tempest/lib/api_schema/response/compute/v2_53/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_53/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_53/services.py b/tempest/lib/api_schema/response/compute/v2_53/services.py
new file mode 100644
index 0000000..aa132a9
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_53/services.py
@@ -0,0 +1,70 @@
+# Copyright 2018 AT&T Corporation.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_11 import services \
+ as servicesv211
+
+# ***************** Schemas changed in microversion 2.53 *****************
+
+# NOTE(felipemonteiro): This is schema for microversion 2.53 which includes:
+#
+# * changing the service 'id' to 'string' type only
+# * adding update_service which supersedes enable_service, disable_service,
+# disable_log_reason, update_forced_down.
+
+list_services = copy.deepcopy(servicesv211.list_services)
+# The ID of the service is a uuid, so v2.1 pattern does not apply.
+list_services['response_body']['properties']['services']['items'][
+ 'properties']['id'] = {'type': 'string', 'format': 'uuid'}
+
+update_service = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'binary': {'type': 'string'},
+ 'disabled_reason': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'state': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'updated_at': parameter_types.date_time,
+ 'zone': {'type': 'string'},
+ 'forced_down': {'type': 'boolean'}
+ },
+ 'additionalProperties': False,
+ 'required': ['id', 'binary', 'disabled_reason', 'host',
+ 'state', 'status', 'updated_at', 'zone',
+ 'forced_down']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['service']
+ }
+}
+
+# **** Schemas unchanged in microversion 2.53 since microversion 2.11 ****
+# Note(felipemonteiro): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+enable_disable_service = copy.deepcopy(servicesv211.enable_disable_service)
+update_forced_down = copy.deepcopy(servicesv211.update_forced_down)
+disable_log_reason = copy.deepcopy(servicesv211.disable_log_reason)
diff --git a/tempest/lib/api_schema/response/compute/v2_61/__init__.py b/tempest/lib/api_schema/response/compute/v2_61/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_61/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_61/flavors.py b/tempest/lib/api_schema/response/compute/v2_61/flavors.py
new file mode 100644
index 0000000..381fb64
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_61/flavors.py
@@ -0,0 +1,102 @@
+# Copyright 2018 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_55 import flavors \
+ as flavorsv255
+
+# ****** Schemas changed in microversion 2.61 *****************
+
+# Note(gmann): This is schema for microversion 2.61 which includes the
+# Flavor extra_specs in the Response body of the following APIs:
+# - ``PUT /flavors/{flavor_id}``
+# - ``GET /flavors/detail``
+# - ``GET /flavors/{flavor_id}``
+# - ``POST /flavors``
+
+flavor_description = {
+ 'type': ['string', 'null'],
+ 'minLength': 0, 'maxLength': 65535
+}
+
+flavor_extra_specs = {
+ 'type': 'object',
+ 'patternProperties': {
+ '^[a-zA-Z0-9-_:. ]{1,255}$': {'type': 'string'}
+ }
+}
+
+common_flavor_info = {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'ram': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ # 'swap' attributes comes as integer value but if it is empty
+ # it comes as "". So defining type of as string and integer.
+ 'swap': {'type': ['integer', 'string']},
+ 'disk': {'type': 'integer'},
+ 'id': {'type': 'string'},
+ 'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+ 'os-flavor-access:is_public': {'type': 'boolean'},
+ 'rxtx_factor': {'type': 'number'},
+ 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'},
+ 'description': flavor_description,
+ 'extra_specs': flavor_extra_specs
+ },
+ 'additionalProperties': False,
+ # 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and
+ # 'OS-FLV-EXT-DATA' are API extensions. so they are not 'required'.
+ 'required': ['name', 'links', 'ram', 'vcpus', 'swap', 'disk', 'id',
+ 'description']
+}
+
+list_flavors_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavors': {
+ 'type': 'array',
+ 'items': common_flavor_info
+ },
+ # NOTE(gmann): flavors_links attribute is not necessary
+ # to be present always so it is not 'required'.
+ 'flavors_links': parameter_types.links
+ },
+ 'additionalProperties': False,
+ 'required': ['flavors']
+ }
+}
+
+create_update_get_flavor_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavor': common_flavor_info
+ },
+ 'additionalProperties': False,
+ 'required': ['flavor']
+ }
+}
+
+# ****** Schemas unchanged in microversion 2.61 since microversion 2.55 ***
+# Note(gmann): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_flavors = copy.deepcopy(flavorsv255.list_flavors)
diff --git a/tempest/lib/cli/output_parser.py b/tempest/lib/cli/output_parser.py
index a7d5e49..45d41c7 100644
--- a/tempest/lib/cli/output_parser.py
+++ b/tempest/lib/cli/output_parser.py
@@ -25,7 +25,7 @@
LOG = logging.getLogger(__name__)
-delimiter_line = re.compile('^\+\-[\+\-]+\-\+$')
+delimiter_line = re.compile(r'^\+\-[\+\-]+\-\+$')
def details_multiple(output_lines, with_label=False):
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index d1f0888..82fcd0b 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -358,5 +358,6 @@
"Run 'tox -v -e uuidgen' to automatically fix tests with\n"
"missing @decorators.idempotent_id decorators.")
+
if __name__ == '__main__':
run()
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 22276d4..bc9cfe2 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -416,6 +416,8 @@
resp_body=None, extra=None):
if 'X-Auth-Token' in req_headers:
req_headers['X-Auth-Token'] = '<omitted>'
+ if 'X-Subject-Token' in req_headers:
+ req_headers['X-Subject-Token'] = '<omitted>'
# A shallow copy is sufficient
resp_log = resp.copy()
if 'x-subject-token' in resp_log:
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index 4923d7e..2fad0a4 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -23,6 +23,8 @@
as schema_extra_specs
from tempest.lib.api_schema.response.compute.v2_55 import flavors \
as schemav255
+from tempest.lib.api_schema.response.compute.v2_61 import flavors \
+ as schemav261
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -31,7 +33,8 @@
schema_versions_info = [
{'min': None, 'max': '2.54', 'schema': schema},
- {'min': '2.55', 'max': None, 'schema': schemav255}]
+ {'min': '2.55', 'max': '2.60', 'schema': schemav255},
+ {'min': '2.61', 'max': None, 'schema': schemav261}]
def list_flavors(self, detail=False, **params):
"""Lists flavors.
@@ -202,7 +205,8 @@
"""
resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
(flavor_id, key))
- self.validate_response(schema.unset_flavor_extra_specs, resp, body)
+ self.validate_response(schema_extra_specs.unset_flavor_extra_specs,
+ resp, body)
return rest_client.ResponseBody(resp, body)
def list_flavor_access(self, flavor_id):
diff --git a/tempest/lib/services/compute/services_client.py b/tempest/lib/services/compute/services_client.py
index b046c35..d52de3a 100644
--- a/tempest/lib/services/compute/services_client.py
+++ b/tempest/lib/services/compute/services_client.py
@@ -20,6 +20,8 @@
from tempest.lib.api_schema.response.compute.v2_1 import services as schema
from tempest.lib.api_schema.response.compute.v2_11 import services \
as schemav211
+from tempest.lib.api_schema.response.compute.v2_53 import services \
+ as schemav253
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -28,7 +30,8 @@
schema_versions_info = [
{'min': None, 'max': '2.10', 'schema': schema},
- {'min': '2.11', 'max': None, 'schema': schemav211}]
+ {'min': '2.11', 'max': '2.52', 'schema': schemav211},
+ {'min': '2.53', 'max': None, 'schema': schemav253}]
def list_services(self, **params):
"""Lists all running Compute services for a tenant.
@@ -47,9 +50,30 @@
self.validate_response(_schema.list_services, resp, body)
return rest_client.ResponseBody(resp, body)
+ def update_service(self, service_id, **kwargs):
+ """Update a compute service.
+
+ Update a compute service to enable or disable scheduling, including
+ recording a reason why a compute service was disabled from scheduling.
+
+ This API is available starting with microversion 2.53.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/compute/#update-compute-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/%s' % service_id, put_body)
+ body = json.loads(body)
+ _schema = self.get_schema(self.schema_versions_info)
+ self.validate_response(_schema.update_service, resp, body)
+ return rest_client.ResponseBody(resp, body)
+
def enable_service(self, **kwargs):
"""Enable service on a host.
+ ``update_service`` supersedes this API starting with microversion 2.53.
+
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#enable-scheduling-for-a-compute-service
@@ -63,6 +87,8 @@
def disable_service(self, **kwargs):
"""Disable service on a host.
+ ``update_service`` supersedes this API starting with microversion 2.53.
+
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service
@@ -76,6 +102,8 @@
def disable_log_reason(self, **kwargs):
"""Disables scheduling for a Compute service and logs reason.
+ ``update_service`` supersedes this API starting with microversion 2.53.
+
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service-and-log-disabled-reason
@@ -89,6 +117,8 @@
def update_forced_down(self, **kwargs):
"""Set or unset ``forced_down`` flag for the service.
+ ``update_service`` supersedes this API starting with microversion 2.53.
+
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#update-forced-down
diff --git a/tempest/lib/services/network/agents_client.py b/tempest/lib/services/network/agents_client.py
index a0f832e..5068121 100644
--- a/tempest/lib/services/network/agents_client.py
+++ b/tempest/lib/services/network/agents_client.py
@@ -87,9 +87,11 @@
return self.delete_resource(uri)
def add_dhcp_agent_to_network(self, agent_id, **kwargs):
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526212
+ """Schedule a network to a DHCP agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#schedule-a-network-to-a-dhcp-agent
+ """
uri = '/agents/%s/dhcp-networks' % agent_id
return self.create_resource(uri, kwargs, expect_empty_body=True)
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
index 0fac6bd..1fde79f 100644
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ b/tempest/lib/services/volume/v1/encryption_types_client.py
@@ -38,7 +38,7 @@
def show_encryption_type(self, volume_type_id):
"""Get the volume encryption type for the specified volume type.
- volume_type_id: Id of volume_type.
+ :param volume_type_id: Id of volume type.
"""
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
@@ -61,7 +61,7 @@
return rest_client.ResponseBody(resp, body)
def delete_encryption_type(self, volume_type_id):
- """Delete the encryption type for the specified volume-type."""
+ """Delete the encryption type for the specified volume type."""
resp, body = self.delete(
"/types/%s/encryption/provider" % volume_type_id)
self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v3/encryption_types_client.py b/tempest/lib/services/volume/v3/encryption_types_client.py
index 7443a87..03de187 100644
--- a/tempest/lib/services/volume/v3/encryption_types_client.py
+++ b/tempest/lib/services/volume/v3/encryption_types_client.py
@@ -38,7 +38,7 @@
def show_encryption_type(self, volume_type_id):
"""Get the volume encryption type for the specified volume type.
- volume_type_id: Id of volume_type.
+ :param volume_type_id: Id of volume type.
"""
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 09036a4..22155a9 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -20,9 +20,15 @@
class ServicesClient(rest_client.RestClient):
- """Client class to send CRUD Volume API requests"""
+ """Client class to send CRUD Volume Services API requests"""
def list_services(self, **params):
+ """List all Cinder services.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#list-all-cinder-services
+ """
url = 'os-services'
if params:
url += '?%s' % urllib.urlencode(params)
@@ -31,3 +37,66 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def enable_service(self, **kwargs):
+ """Enable service on a host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#enable-a-cinder-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/enable', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def disable_service(self, **kwargs):
+ """Disable service on a host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#disable-a-cinder-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def disable_log_reason(self, **kwargs):
+ """Disable scheduling for a volume service and log disabled reason.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#log-disabled-cinder-service-information
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable-log-reason', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def freeze_host(self, **kwargs):
+ """Freeze a Cinder backend host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#freeze-a-cinder-backend-host
+ """
+ put_body = json.dumps(kwargs)
+ resp, _ = self.put('os-services/freeze', put_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp)
+
+ def thaw_host(self, **kwargs):
+ """Thaw a Cinder backend host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#thaw-a-cinder-backend-host
+ """
+ put_body = json.dumps(kwargs)
+ resp, _ = self.put('os-services/thaw', put_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
index 298925a..08e6c94 100644
--- a/tempest/lib/services/volume/v3/snapshots_client.py
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -114,12 +114,12 @@
return rest_client.ResponseBody(resp, body)
def update_snapshot_status(self, snapshot_id, **kwargs):
- """Update the specified snapshot's status."""
- # TODO(gmann): api-site doesn't contain doc ref
- # for this API. After fixing the api-site, we need to
- # add the link here.
- # Bug https://bugs.launchpad.net/openstack-api-site/+bug/1532645
+ """Update status of a snapshot.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-status-of-a-snapshot
+ """
post_body = json.dumps({'os-update_snapshot_status': kwargs})
url = 'snapshots/%s/action' % snapshot_id
resp, body = self.post(url, post_body)
@@ -176,11 +176,12 @@
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
- """Update metadata item for the snapshot."""
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
+ """Update metadata for the snapshot for a specific key.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-a-snapshot-s-metadata-for-a-specific-key
+ """
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.put(url, put_body)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 6d9d03a..13ecd15 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -21,7 +21,7 @@
class TypesClient(rest_client.RestClient):
- """Client class to send CRUD Volume API requests"""
+ """Client class to send CRUD Volume Types API requests"""
def is_resource_deleted(self, id):
try:
@@ -36,7 +36,7 @@
return 'volume-type'
def list_volume_types(self, **params):
- """List all the volume_types created.
+ """List all the volume types created.
For a full list of available parameters, please refer to the official
API reference:
@@ -52,7 +52,7 @@
return rest_client.ResponseBody(resp, body)
def show_volume_type(self, volume_type_id):
- """Returns the details of a single volume_type.
+ """Returns the details of a single volume type.
For a full list of available parameters, please refer to the official
API reference:
@@ -78,7 +78,7 @@
return rest_client.ResponseBody(resp, body)
def delete_volume_type(self, volume_type_id):
- """Deletes the Specified Volume_type.
+ """Deletes the specified volume type.
For a full list of available parameters, please refer to the official
API reference:
@@ -89,11 +89,11 @@
return rest_client.ResponseBody(resp, body)
def list_volume_types_extra_specs(self, volume_type_id, **params):
- """List all the volume_types extra specs created.
+ """List all the volume type extra specs created.
- TODO: Current api-site doesn't contain this API description.
- After fixing the api-site, we need to fix here also for putting
- the link to api-site.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#show-all-extra-specifications-for-volume-type
"""
url = 'types/%s/extra_specs' % volume_type_id
if params:
@@ -105,7 +105,7 @@
return rest_client.ResponseBody(resp, body)
def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
- """Returns the details of a single volume_type extra spec."""
+ """Returns the details of a single volume type extra spec."""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
resp, body = self.get(url)
body = json.loads(body)
@@ -113,10 +113,10 @@
return rest_client.ResponseBody(resp, body)
def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
- """Creates a new Volume_type extra spec.
+ """Creates new volume type extra specs.
- volume_type_id: Id of volume_type.
- extra_specs: A dictionary of values to be used as extra_specs.
+ :param volume_type_id: Id of volume type.
+ :param extra_specs: A dictionary of values to be used as extra_specs.
"""
url = "types/%s/extra_specs" % volume_type_id
post_body = json.dumps({'extra_specs': extra_specs})
@@ -126,7 +126,7 @@
return rest_client.ResponseBody(resp, body)
def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
- """Deletes the Specified Volume_type extra spec."""
+ """Deletes the specified volume type extra spec."""
resp, body = self.delete("types/%s/extra_specs/%s" % (
volume_type_id, extra_spec_name))
self.expected_success(202, resp.status)
@@ -149,10 +149,10 @@
extra_specs):
"""Update a volume_type extra spec.
- volume_type_id: Id of volume_type.
- extra_spec_name: Name of the extra spec to be updated.
- extra_spec: A dictionary of with key as extra_spec_name and the
- updated value.
+ :param volume_type_id: Id of volume type.
+ :param extra_spec_name: Name of the extra spec to be updated.
+ :param extra_specs: A dictionary of with key as extra_spec_name and the
+ updated value.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-extra-specification-for-volume-type
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 9965fe5..c5d41a0 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -443,7 +443,9 @@
disk_format=img_disk_format,
properties=img_properties)
except IOError:
- LOG.debug("A qcow2 image was not found. Try to get a uec image.")
+ LOG.warning(
+ "A(n) %s image was not found. Retrying with uec image.",
+ img_disk_format)
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
@@ -807,8 +809,13 @@
return subnet
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'], fixed_ip=ip_addr)['ports']
+ if ip_addr:
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'],
+ fixed_ips='ip_address=%s' % ip_addr)['ports']
+ else:
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'])['ports']
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index bcd4ddb..8212e75 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -435,7 +435,6 @@
@testtools.skipIf(CONF.network.shared_physical_network,
'Connectivity can only be tested when in a '
'multitenant network environment')
- @decorators.skip_because(bug="1610994")
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_connectivity_between_vms_on_different_networks(self):
diff --git a/tempest/scenario/test_volume_backup_restore.py b/tempest/scenario/test_volume_backup_restore.py
index c23b564..8a8c54e 100644
--- a/tempest/scenario/test_volume_backup_restore.py
+++ b/tempest/scenario/test_volume_backup_restore.py
@@ -14,6 +14,7 @@
# under the License.
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
@@ -56,6 +57,8 @@
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index a1c8c53..3ed8a10 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -17,6 +17,11 @@
import subprocess
import tempfile
+from mock import patch
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
from tempest.cmd import workspace
from tempest.lib.common.utils import data_utils
from tempest.tests import base
@@ -140,3 +145,42 @@
self.addCleanup(shutil.rmtree, path, ignore_errors=True)
self.workspace_manager.register_new_workspace(name, path)
self.assertIsNotNone(self.workspace_manager.get_workspace(name))
+
+ def test_workspace_name_not_exists(self):
+ nonexistent_name = data_utils.rand_uuid()
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager._name_exists,
+ nonexistent_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n" %
+ nonexistent_name)
+
+ def test_workspace_name_already_exists(self):
+ duplicate_name = self.name
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ _workspace_name_exists,
+ duplicate_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace already exists with name: %s.\n"
+ % duplicate_name)
+
+ def test_workspace_manager_path_not_exist(self):
+ fake_path = "fake_path"
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager._validate_path,
+ fake_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "Path does not exist.\n")
+
+ def test_workspace_manager_list_workspaces(self):
+ listed = self.workspace_manager.list_workspaces()
+ self.assertEqual(1, len(listed))
+ self.assertIn(self.name, listed)
+ self.assertEqual(self.path, listed.get(self.name))
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 739357b..1f0080f 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -77,7 +77,7 @@
def test_write_to_console_special_chars(self):
self._test_write_to_console_helper(
- '\`',
+ r'\`',
'sudo sh -c "echo \\"\\\\\\`\\" >/dev/console"')
self.conn.write_to_console('$')
self._assert_exec_called_with(
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4a2fff4..be54130 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -59,6 +59,7 @@
self._set_attrs()
self.lock_path = cfg.CONF.oslo_concurrency.lock_path
+
fake_service1_group = cfg.OptGroup(name='fake-service1', title='Fake service1')
FakeService1Group = [
diff --git a/tempest/tests/lib/common/utils/test_data_utils.py b/tempest/tests/lib/common/utils/test_data_utils.py
index b8385b2..a0267d0 100644
--- a/tempest/tests/lib/common/utils/test_data_utils.py
+++ b/tempest/tests/lib/common/utils/test_data_utils.py
@@ -88,7 +88,7 @@
def test_rand_url(self):
actual = data_utils.rand_url()
self.assertIsInstance(actual, str)
- self.assertRegex(actual, "^https://url-[0-9]*\.com$")
+ self.assertRegex(actual, r"^https://url-[0-9]*\.com$")
actual2 = data_utils.rand_url()
self.assertNotEqual(actual, actual2)
diff --git a/tempest/tests/lib/services/compute/test_services_client.py b/tempest/tests/lib/services/compute/test_services_client.py
index 2dd981c..ba432e3 100644
--- a/tempest/tests/lib/services/compute/test_services_client.py
+++ b/tempest/tests/lib/services/compute/test_services_client.py
@@ -56,6 +56,20 @@
}
}
+ FAKE_UPDATE_SERVICE = {
+ "service": {
+ "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339",
+ "binary": "nova-compute",
+ "disabled_reason": "test2",
+ "host": "host1",
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "2012-10-29T13:42:05.000000",
+ "forced_down": False,
+ "zone": "nova"
+ }
+ }
+
def setUp(self):
super(TestServicesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -119,6 +133,28 @@
binary="controller",
disabled_reason='test reason')
+ def _test_update_service(self, bytes_body=False, status=None,
+ disabled_reason=None, forced_down=None):
+ resp_body = copy.deepcopy(self.FAKE_UPDATE_SERVICE)
+ kwargs = {}
+
+ if status is not None:
+ kwargs['status'] = status
+ if disabled_reason is not None:
+ kwargs['disabled_reason'] = disabled_reason
+ if forced_down is not None:
+ kwargs['forced_down'] = forced_down
+
+ resp_body['service'].update(kwargs)
+
+ self.check_service_client_function(
+ self.client.update_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ bytes_body,
+ service_id=resp_body['service']['id'],
+ **kwargs)
+
def test_log_reason_disabled_service_with_str_body(self):
self._test_log_reason_disabled_service()
@@ -144,3 +180,36 @@
new_callable=mock.PropertyMock(return_value='2.11'))
def test_update_forced_down_with_bytes_body(self, _):
self._test_update_forced_down(bytes_body=True)
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.53'))
+ def test_update_service_disable_scheduling_with_str_body(self, _):
+ self._test_update_service(status='disabled',
+ disabled_reason='maintenance')
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.53'))
+ def test_update_service_disable_scheduling_with_bytes_body(self, _):
+ self._test_update_service(status='disabled',
+ disabled_reason='maintenance',
+ bytes_body=True)
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.53'))
+ def test_update_service_enable_scheduling_with_str_body(self, _):
+ self._test_update_service(status='enabled')
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.53'))
+ def test_update_service_enable_scheduling_with_bytes_body(self, _):
+ self._test_update_service(status='enabled', bytes_body=True)
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.53'))
+ def test_update_service_forced_down_with_str_body(self, _):
+ self._test_update_service(forced_down=True)
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.53'))
+ def test_update_service_forced_down_with_bytes_body(self, _):
+ self._test_update_service(forced_down=True, bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_services_client.py b/tempest/tests/lib/services/volume/v3/test_services_client.py
new file mode 100644
index 0000000..f65228f
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_services_client.py
@@ -0,0 +1,214 @@
+# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.services.volume.v3 import services_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServicesClient(base.BaseServiceTest):
+
+ FAKE_SERVICE_LIST = {
+ "services": [
+ {
+ "status": "enabled",
+ "binary": "cinder-backup",
+ "zone": "nova",
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:17.000000",
+ "host": "fake-host",
+ "disabled_reason": None
+ },
+ {
+ "status": "enabled",
+ "binary": "cinder-scheduler",
+ "zone": "nova",
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:24.000000",
+ "host": "fake-host",
+ "disabled_reason": None
+ },
+ {
+ "status": "enabled",
+ "binary": "cinder-volume",
+ "zone": "nova",
+ "frozen": False,
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:20.000000",
+ "host": "fake-host@lvm",
+ "replication_status": "disabled",
+ "active_backend_id": None,
+ "disabled_reason": None
+ }
+ ]
+ }
+
+ FAKE_SERVICE_REQUEST = {
+ "host": "fake-host",
+ "binary": "cinder-volume"
+ }
+
+ FAKE_SERVICE_RESPONSE = {
+ "disabled": False,
+ "status": "enabled",
+ "host": "fake-host@lvm",
+ "service": "",
+ "binary": "cinder-volume",
+ "disabled_reason": None
+ }
+
+ def setUp(self):
+ super(TestServicesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = services_client.ServicesClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_services(self, bytes_body=False,
+ mock_args='os-services', **params):
+ self.check_service_client_function(
+ self.client.list_services,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SERVICE_LIST,
+ to_utf=bytes_body,
+ mock_args=[mock_args],
+ **params)
+
+ def _test_enable_service(self, bytes_body=False):
+ resp_body = self.FAKE_SERVICE_RESPONSE
+ kwargs = self.FAKE_SERVICE_REQUEST
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.enable_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/enable', payload],
+ **kwargs)
+
+ def _test_disable_service(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
+ resp_body.pop('disabled_reason')
+ resp_body['disabled'] = True
+ resp_body['status'] = 'disabled'
+ kwargs = self.FAKE_SERVICE_REQUEST
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.disable_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/disable', payload],
+ **kwargs)
+
+ def _test_disable_log_reason(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
+ resp_body['disabled_reason'] = "disabled for test"
+ resp_body['disabled'] = True
+ resp_body['status'] = 'disabled'
+ kwargs = copy.deepcopy(self.FAKE_SERVICE_REQUEST)
+ kwargs.update({"disabled_reason": "disabled for test"})
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.disable_log_reason,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/disable-log-reason', payload],
+ **kwargs)
+
+ def _test_freeze_host(self, bytes_body=False):
+ kwargs = {'host': 'host1@lvm'}
+ self.check_service_client_function(
+ self.client.freeze_host,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ **kwargs)
+
+ def _test_thaw_host(self, bytes_body=False):
+ kwargs = {'host': 'host1@lvm'}
+ self.check_service_client_function(
+ self.client.thaw_host,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ **kwargs)
+
+ def test_list_services_with_str_body(self):
+ self._test_list_services()
+
+ def test_list_services_with_bytes_body(self):
+ self._test_list_services(bytes_body=True)
+
+ def test_list_services_with_params(self):
+ mock_args = 'os-services?host=fake-host'
+ self._test_list_services(mock_args=mock_args, host='fake-host')
+
+ def test_enable_service_with_str_body(self):
+ self._test_enable_service()
+
+ def test_enable_service_with_bytes_body(self):
+ self._test_enable_service(bytes_body=True)
+
+ def test_disable_service_with_str_body(self):
+ self._test_disable_service()
+
+ def test_disable_service_with_bytes_body(self):
+ self._test_disable_service(bytes_body=True)
+
+ def test_disable_log_reason_with_str_body(self):
+ self._test_disable_log_reason()
+
+ def test_disable_log_reason_with_bytes_body(self):
+ self._test_disable_log_reason(bytes_body=True)
+
+ def test_freeze_host_with_str_body(self):
+ self._test_freeze_host()
+
+ def test_freeze_host_with_bytes_body(self):
+ self._test_freeze_host(bytes_body=True)
+
+ def test_thaw_host_with_str_body(self):
+ self._test_thaw_host()
+
+ def test_thaw_host_with_bytes_body(self):
+ self._test_thaw_host(bytes_body=True)
diff --git a/tempest/tests/test_list_tests.py b/tempest/tests/test_list_tests.py
index 4af7463..1cc9c9a 100644
--- a/tempest/tests/test_list_tests.py
+++ b/tempest/tests/test_list_tests.py
@@ -34,7 +34,7 @@
"error on import %s" % ids)
ids = six.text_type(ids).split('\n')
for test_id in ids:
- if re.match('(\w+\.){3}\w+', test_id):
+ if re.match(r'(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
parts = test_id.partition('tempest')
fail_id = parts[1] + parts[2]
diff --git a/tools/check_logs.py b/tools/check_logs.py
index b80ccc0..de7e41d 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -96,7 +96,7 @@
def collect_url_logs(url):
page = urlreq.urlopen(url)
content = page.read()
- logs = re.findall('(screen-[\w-]+\.txt\.gz)</a>', content)
+ logs = re.findall(r'(screen-[\w-]+\.txt\.gz)</a>', content)
return logs
@@ -162,6 +162,7 @@
print("ok")
return 0
+
usage = """
Find non-white-listed log errors in log files from a devstack-gate run.
Log files will be searched for ERROR or CRITICAL messages. If any
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index bbb9019..4eb78fb 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -63,12 +63,13 @@
except HTTPError as err:
if err.code == 404:
return False
- p = re.compile('^tempest\.test_plugins', re.M)
+ p = re.compile(r'^tempest\.test_plugins', re.M)
if p.findall(r.read().decode('utf-8')):
return True
else:
False
+
r = urllib.urlopen(url)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
diff --git a/tox.ini b/tox.ini
index da0233a..de4f1b7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,7 +19,7 @@
OS_STDOUT_CAPTURE=1
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=160
- PYTHONWARNINGS=default::DeprecationWarning
+ PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
install_command = pip install {opts} {packages}