Merge "Add Networks client unit tests"
diff --git a/.gitignore b/.gitignore
index 287db4c..7cb052f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,6 +18,7 @@
dist
build
.testrepository
+.stestr
.idea
.project
.pydevproject
diff --git a/.stestr.conf b/.stestr.conf
new file mode 100644
index 0000000..e3201c1
--- /dev/null
+++ b/.stestr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_path=./tempest/test_discover
+group_regex=([^\.]*\.)*
+
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000..ec6c59a
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,101 @@
+- job:
+ name: devstack-tempest
+ parent: devstack
+ description: Base Tempest job.
+ required-projects:
+ - openstack/tempest
+ timeout: 7200
+ roles:
+ - zuul: openstack-dev/devstack
+ vars:
+ devstack_services:
+ tempest: True
+ run: playbooks/devstack-tempest.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: tempest-tox-plugin-sanity-check
+ parent: tox
+ description: |
+ Run tempest plugin sanity check script using tox.
+ nodeset: ubuntu-xenial
+ vars:
+ tox_envlist: plugin-sanity-check
+ voting: false
+ timeout: 5000
+ required-projects:
+ - openstack/almanach
+ - openstack/aodh
+ - openstack/barbican-tempest-plugin
+ - openstack/ceilometer
+ - openstack/cinder
+ - openstack/congress
+ - openstack/designate-tempest-plugin
+ - openstack/ec2-api
+ - openstack/freezer
+ - openstack/freezer-api
+ - openstack/freezer-tempest-plugin
+ - openstack/gce-api
+ - openstack/glare
+ - openstack/heat
+ - openstack/intel-nfv-ci-tests
+ - openstack/ironic
+ - openstack/ironic-inspector
+ - openstack/keystone-tempest-plugin
+ - openstack/kingbird
+ - openstack/kuryr-tempest-plugin
+ - openstack/magnum
+ - openstack/magnum-tempest-plugin
+ - openstack/manila
+ - openstack/manila-tempest-plugin
+ - openstack/mistral
+ - openstack/mogan
+ - openstack/monasca-api
+ - openstack/monasca-log-api
+ - openstack/murano
+ - openstack/networking-bgpvpn
+ - openstack/networking-cisco
+ - openstack/networking-fortinet
+ - openstack/networking-generic-switch
+ - openstack/networking-l2gw
+ - openstack/networking-midonet
+ - openstack/networking-plumgrid
+ - openstack/networking-sfc
+ - openstack/neutron
+ - openstack/neutron-dynamic-routing
+ - openstack/neutron-fwaas
+ - openstack/neutron-lbaas
+ - openstack/neutron-tempest-plugin
+ - openstack/neutron-vpnaas
+ - openstack/nova-lxd
+ - openstack/novajoin-tempest-plugin
+ - openstack/octavia
+ - openstack/oswin-tempest-plugin
+ - openstack/panko
+ - openstack/patrole
+ - openstack/qinling
+ - openstack/requirements
+ - openstack/sahara-tests
+ - openstack/senlin
+ - openstack/senlin-tempest-plugin
+ - openstack/tap-as-a-service
+ - openstack/tempest-horizon
+ - openstack/trio2o
+ - openstack/trove
+ - openstack/valet
+ - openstack/vitrage
+ - openstack/vmware-nsx-tempest-plugin
+ - openstack/watcher-tempest-plugin
+ - openstack/zaqar-tempest-plugin
+ - openstack/zun-tempest-plugin
+
+- project:
+ name: openstack/tempest
+ check:
+ jobs:
+ - devstack-tempest:
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
+ - tempest-tox-plugin-sanity-check
diff --git a/HACKING.rst b/HACKING.rst
index dbb758b..57f0409 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -2,21 +2,21 @@
====================
- Step 1: Read the OpenStack Style Commandments
- http://docs.openstack.org/developer/hacking/
+ https://docs.openstack.org/hacking/latest/
- Step 2: Read on
Tempest Specific Commandments
------------------------------
- [T102] Cannot import OpenStack python clients in tempest/api &
- tempest/scenario tests
+ tempest/scenario tests
- [T104] Scenario tests require a services decorator
- [T105] Tests cannot use setUpClass/tearDownClass
- [T106] vim configuration should not be kept in source files.
- [T107] Check that a service tag isn't in the module path
- [T108] Check no hyphen at the end of rand_name() argument
- [T109] Cannot use testtools.skip decorator; instead use
- decorators.skip_because from tempest.lib
+ decorators.skip_because from tempest.lib
- [T110] Check that service client names of GET should be consistent
- [T111] Check that service client names of DELETE should be consistent
- [T112] Check that tempest.lib should not import local tempest code
@@ -24,6 +24,7 @@
- [T114] Check that tempest.lib does not use tempest config
- [T115] Check that admin tests should exist under admin path
- [N322] Method's default argument shouldn't be mutable
+- [T116] Unsupported 'message' Exception attribute in PY3
Test Data/Configuration
-----------------------
@@ -83,7 +84,7 @@
It is recommended to use testtools `matcher`_ for the more tricky assertions.
You can implement your own specific `matcher`_ as well.
-.. _matcher: http://testtools.readthedocs.org/en/latest/for-test-authors.html#matchers
+.. _matcher: https://testtools.readthedocs.org/en/latest/for-test-authors.html#matchers
If the test case fails you can see the related logs and the information
carried by the exception (exception class, backtrack and exception info).
@@ -102,20 +103,20 @@
Service Tagging
---------------
Service tagging is used to specify which services are exercised by a particular
-test method. You specify the services with the tempest.test.services decorator.
-For example:
+test method. You specify the services with the ``tempest.common.utils.services``
+decorator. For example:
-@services('compute', 'image')
+@utils.services('compute', 'image')
Valid service tag names are the same as the list of directories in tempest.api
that have tests.
-For scenario tests having a service tag is required. For the api tests service
-tags are only needed if the test method makes an api call (either directly or
+For scenario tests having a service tag is required. For the API tests service
+tags are only needed if the test method makes an API call (either directly or
indirectly through another service) that differs from the parent directory
-name. For example, any test that make an api call to a service other than nova
-in tempest.api.compute would require a service tag for those services, however
-they do not need to be tagged as compute.
+name. For example, any test that make an API call to a service other than Nova
+in ``tempest.api.compute`` would require a service tag for those services,
+however they do not need to be tagged as ``compute``.
Test fixtures and resources
---------------------------
@@ -127,6 +128,12 @@
Test class level resources should be defined in the `resource_setup` method of
the test class, except for any credential obtained from the credentials
provider, which should be set-up in the `setup_credentials` method.
+Cleanup is best scheduled using `addClassResourceCleanup` which ensures that
+the cleanup code is always invoked, and in reverse order with respect to the
+creation order.
+
+In both cases - test level and class level cleanups - a wait loop should be
+scheduled before the actual delete of resources with an asynchronous delete.
The test base class `BaseTestCase` defines Tempest framework for class level
fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
@@ -171,7 +178,7 @@
All negative tests should be based on `API-WG guideline`_ . Such negative
tests can block any changes from accurate failure code to invalid one.
-.. _API-WG guideline: http://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications
+.. _API-WG guideline: https://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications
If facing some gray area which is not clarified on the above guideline, propose
a new guideline to the API-WG. With a proposal to the API-WG we will be able to
@@ -198,7 +205,7 @@
Test skips because of Known Bugs
--------------------------------
If a test is broken because of a bug it is appropriate to skip the test until
-bug has been fixed. You should use the skip_because decorator so that
+bug has been fixed. You should use the ``skip_because`` decorator so that
Tempest's skip tracking tool can watch the bug status.
Example::
@@ -229,7 +236,7 @@
require admin privileges are outside of projects.
- Races between methods in the same class are not a problem because
- parallelization in tempest is at the test class level, but if there is a json
+ parallelization in Tempest is at the test class level, but if there is a json
and xml version of the same test class there could still be a race between
methods.
@@ -238,8 +245,8 @@
avoided to prevent resource conflicts.
- If the execution of a set of tests is required to be serialized then locking
- can be used to perform this. See AggregatesAdminTest in
- tempest.api.compute.admin for an example of using locking.
+ can be used to perform this. See usage of ``LockFixture`` for examples of
+ using locking.
Sample Configuration File
-------------------------
@@ -251,7 +258,7 @@
Unit Tests
----------
-Unit tests are a separate class of tests in tempest. They verify tempest
+Unit tests are a separate class of tests in Tempest. They verify Tempest
itself, and thus have a different set of guidelines around them:
1. They can not require anything running externally. All you should need to
@@ -321,8 +328,8 @@
Tempest.lib includes a ``check-uuid`` tool that will test for the existence
and uniqueness of idempotent_id metadata for every test. If you have
-tempest installed you run the tool against Tempest by calling from the
-tempest repo::
+Tempest installed you run the tool against Tempest by calling from the
+Tempest repo::
check-uuid
@@ -337,7 +344,7 @@
check-uuid --fix
-The ``check-uuid`` tool is used as part of the tempest gate job
+The ``check-uuid`` tool is used as part of the Tempest gate job
to ensure that all tests have an ``idempotent_id`` decorator.
Branchless Tempest Considerations
@@ -350,17 +357,17 @@
proposed commits to Tempest must work against both the master and all the
currently supported stable branches of the projects. As such there are a few
special considerations that have to be accounted for when pushing new changes
-to tempest.
+to Tempest.
1. New Tests for new features
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When adding tests for new features that were not in previous releases of the
projects the new test has to be properly skipped with a feature flag. Whether
-this is just as simple as using the @test.requires_ext() decorator to check
-if the required extension (or discoverable optional API) is enabled or adding
-a new config option to the appropriate section. If there isn't a method of
-selecting the new **feature** from the config file then there won't be a
+this is just as simple as using the @utils.requires_ext() decorator to
+check if the required extension (or discoverable optional API) is enabled or
+adding a new config option to the appropriate section. If there isn't a method
+of selecting the new **feature** from the config file then there won't be a
mechanism to disable the test with older stable releases and the new test won't
be able to merge.
@@ -370,14 +377,22 @@
When trying to land a bug fix which changes a tested API you'll have to use the
following procedure::
- - Propose change to the project, get a +2 on the change even with failing
- - Propose skip on Tempest which will only be approved after the
+ 1. Propose change to the project, get a +2 on the change even with failing
+ 2. Propose skip on Tempest which will only be approved after the
corresponding change in the project has a +2 on change
- - Land project change in master and all open stable branches (if required)
- - Land changed test in Tempest
+ 3. Land project change in master and all open stable branches (if required)
+ 4. Land changed test in Tempest
Otherwise the bug fix won't be able to land in the project.
+Handily, `Zuul’s cross-repository dependencies
+<https://docs.openstack.org/infra/zuul/user/gating.html#cross-project-dependencies>`_.
+can be leveraged to do without step 2 and to have steps 3 and 4 happen
+"atomically". To do that, make the patch written in step 1 to depend (refer to
+Zuul's documentation above) on the patch written in step 4. The commit message
+for the Tempest change should have a link to the Gerrit review that justifies
+that change.
+
3. New Tests for existing features
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/README.rst b/README.rst
index ac93992..242f4d5 100644
--- a/README.rst
+++ b/README.rst
@@ -2,8 +2,8 @@
Team and repository tags
========================
-.. image:: http://governance.openstack.org/badges/tempest.svg
- :target: http://governance.openstack.org/reference/tags/index.html
+.. image:: https://governance.openstack.org/badges/tempest.svg
+ :target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on
@@ -11,7 +11,7 @@
==============================================
The documentation for Tempest is officially hosted at:
-http://docs.openstack.org/developer/tempest/
+https://docs.openstack.org/tempest/latest/
This is a set of integration tests to be run against a live OpenStack
cluster. Tempest has batteries of tests for OpenStack API validation,
@@ -23,7 +23,7 @@
Tempest Design Principles that we strive to live by.
- Tempest should be able to run against any OpenStack cloud, be it a
- one node devstack install, a 20 node lxc cloud, or a 1000 node kvm
+ one node DevStack install, a 20 node LXC cloud, or a 1000 node KVM
cloud.
- Tempest should be explicit in testing features. It is easy to auto
discover features of a cloud incorrectly, and give people an
@@ -61,17 +61,17 @@
#. You first need to install Tempest. This is done with pip after you check out
the Tempest repo::
- $ git clone http://git.openstack.org/openstack/tempest
+ $ git clone https://git.openstack.org/openstack/tempest
$ pip install tempest/
This can be done within a venv, but the assumption for this guide is that
- the Tempest cli entry point will be in your shell's PATH.
+ the Tempest CLI entry point will be in your shell's PATH.
-#. Installing Tempest may create a /etc/tempest dir, however if one isn't
- created you can create one or use ~/.tempest/etc or ~/.config/tempest in
- place of /etc/tempest. If none of these dirs are created tempest will create
- ~/.tempest/etc when it's needed. The contents of this dir will always
- automatically be copied to all etc/ dirs in local workspaces as an initial
+#. Installing Tempest may create a ``/etc/tempest dir``, however if one isn't
+ created you can create one or use ``~/.tempest/etc`` or ``~/.config/tempest`` in
+ place of ``/etc/tempest``. If none of these dirs are created Tempest will create
+ ``~/.tempest/etc`` when it's needed. The contents of this dir will always
+ automatically be copied to all ``etc/`` dirs in local workspaces as an initial
setup step. So if there is any common configuration you'd like to be shared
between local Tempest workspaces it's recommended that you pre-populate it
before running ``tempest init``.
@@ -90,12 +90,12 @@
is that you'll create a new working directory for each to maintain separate
configuration files and local artifact storage for each.
-#. Then cd into the newly created working dir and also modify the local
- config files located in the etc/ subdir created by the ``tempest init``
- command. Tempest is expecting a tempest.conf file in etc/ so if only a
+#. Then ``cd`` into the newly created working dir and also modify the local
+ config files located in the ``etc/`` subdir created by the ``tempest init``
+ command. Tempest is expecting a ``tempest.conf`` file in etc/ so if only a
sample exists you must rename or copy it to tempest.conf before making
any changes to it otherwise Tempest will not know how to load it. For
- details on configuring tempest refer to the :ref:`tempest-configuration`.
+ details on configuring Tempest refer to the :ref:`tempest-configuration`.
#. Once the configuration is done you're now ready to run Tempest. This can
be done using the :ref:`tempest_run` command. This can be done by either
@@ -117,15 +117,15 @@
will run the same set of tests as the default gate jobs.
.. _testr: https://testrepository.readthedocs.org/en/latest/MANUAL.html
-.. _ostestr: http://docs.openstack.org/developer/os-testr/
+.. _ostestr: https://docs.openstack.org/os-testr/latest/
Library
-------
Tempest exposes a library interface. This interface is a stable interface and
should be backwards compatible (including backwards compatibility with the
old tempest-lib package, with the exception of the import). If you plan to
-directly consume tempest in your project you should only import code from the
-tempest library interface, other pieces of tempest do not have the same
+directly consume Tempest in your project you should only import code from the
+Tempest library interface, other pieces of Tempest do not have the same
stable interface and there are no guarantees on the Python API unless otherwise
stated.
@@ -133,11 +133,11 @@
Release Versioning
------------------
-`Tempest Release Notes <http://docs.openstack.org/releasenotes/tempest>`_
+`Tempest Release Notes <https://docs.openstack.org/releasenotes/tempest>`_
shows what changes have been released on each version.
Tempest's released versions are broken into 2 sets of information. Depending on
-how you intend to consume tempest you might need
+how you intend to consume Tempest you might need
The version is a set of 3 numbers:
@@ -146,12 +146,12 @@
While this is almost `semver`_ like, the way versioning is handled is slightly
different:
-X is used to represent the supported OpenStack releases for tempest tests
-in-tree, and to signify major feature changes to tempest. It's a monotonically
+X is used to represent the supported OpenStack releases for Tempest tests
+in-tree, and to signify major feature changes to Tempest. It's a monotonically
increasing integer where each version either indicates a new supported OpenStack
release, the drop of support for an OpenStack release (which will coincide with
the upstream stable branch going EOL), or a major feature lands (or is removed)
-from tempest.
+from Tempest.
Y.Z is used to represent library interface changes. This is treated the same
way as minor and patch versions from `semver`_ but only for the library
@@ -166,16 +166,16 @@
Detailed configuration of Tempest is beyond the scope of this
document see :ref:`tempest-configuration` for more details on configuring
-Tempest. The etc/tempest.conf.sample attempts to be a self-documenting version
-of the configuration.
+Tempest. The ``etc/tempest.conf.sample`` attempts to be a self-documenting
+version of the configuration.
You can generate a new sample tempest.conf file, run the following
command from the top level of the Tempest directory::
$ tox -e genconfig
-The most important pieces that are needed are the user ids, openstack
-endpoint, and basic flavors and images needed to run tests.
+The most important pieces that are needed are the user ids, OpenStack
+endpoints, and basic flavors and images needed to run tests.
Unit Tests
----------
@@ -183,20 +183,39 @@
Tempest also has a set of unit tests which test the Tempest code itself. These
tests can be run by specifying the test discovery path::
- $ OS_TEST_PATH=./tempest/tests testr run --parallel
+ $ stestr --test-path ./tempest/tests run
-By setting OS_TEST_PATH to ./tempest/tests it specifies that test discover
-should only be run on the unit test directory. The default value of OS_TEST_PATH
-is OS_TEST_PATH=./tempest/test_discover which will only run test discover on the
+By setting ``--test-path`` option to ./tempest/tests it specifies that test discover
+should only be run on the unit test directory. The default value of ``test_path``
+is ``test_path=./tempest/test_discover`` which will only run test discover on the
Tempest suite.
-Alternatively, there are the py27 and py34 tox jobs which will run the unit
+Alternatively, there are the py27 and py35 tox jobs which will run the unit
tests with the corresponding version of python.
+One common activity is to just run a single test, you can do this with tox
+simply by specifying to just run py27 or py35 tests against a single test::
+
+ $ tox -e py27 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+
+Or all tests in the test_microversions.py file::
+
+ $ tox -e py27 -- -n tempest.tests.test_microversions
+
+You may also use regular expressions to run any matching tests::
+
+ $ tox -e py27 -- test_microversions
+
+Additionally, when running a single test, or test-file, the ``-n/--no-discover``
+argument is no longer required, however it may perform faster if included.
+
+For more information on these options and details about stestr, please see the
+`stestr documentation <http://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
+
Python 2.6
----------
-Starting in the kilo release the OpenStack services dropped all support for
+Starting in the Kilo release the OpenStack services dropped all support for
python 2.6. This change has been mirrored in Tempest, starting after the
tempest-2 tag. This means that proposed changes to Tempest which only fix
python 2.6 compatibility will be rejected, and moving forward more features not
@@ -208,8 +227,8 @@
Python 3.x
----------
-Starting during the Pike cycle Tempest has a gating CI job that runs tempest
-with Python 3. Any tempest release after 15.0.0 should fully support running
+Starting during the Pike cycle Tempest has a gating CI job that runs Tempest
+with Python 3. Any Tempest release after 15.0.0 should fully support running
under Python 3 as well as Python 2.7.
Legacy run method
@@ -239,10 +258,10 @@
.. note::
- If you have a running devstack environment, Tempest will be
+ If you have a running DevStack environment, Tempest will be
automatically configured and placed in ``/opt/stack/tempest``. It
will have a configuration file already set up to work with your
- devstack installation.
+ DevStack installation.
Tempest is not tied to any single test runner, but `testr`_ is the most commonly
used tool. Also, the nosetests test runner is **not** recommended to run Tempest.
diff --git a/REVIEWING.rst b/REVIEWING.rst
index 9b272bb..766d0c6 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -2,7 +2,7 @@
======================
To start read the `OpenStack Common Review Checklist
-<http://docs.openstack.org/infra/manual/developers.html#peer-review>`_
+<https://docs.openstack.org/infra/manual/developers.html#peer-review>`_
Ensuring code is executed
@@ -16,11 +16,20 @@
If a new test is added that depends on a new config option (like a feature
flag), the commit message must reference a change in DevStack or DevStack-Gate
that enables the execution of this newly introduced test. This reference could
-either be a `Cross-Repository Dependency <http://docs.openstack.org/infra/
+either be a `Cross-Repository Dependency <https://docs.openstack.org/infra/
manual/developers.html#cross-repository-dependencies>`_ or a simple link
to a Gerrit review.
+Execution time
+--------------
+While checking in the job logs that a new test is actually executed, also
+pay attention to the execution time of that test. Keep in mind that each test
+is going to be executed hundreds of time each day, because Tempest tests
+run in many OpenStack projects. It's worth considering how important/critical
+the feature under test is with how costly the new test is.
+
+
Unit Tests
----------
@@ -48,6 +57,17 @@
abstract the duplicated code into a function or method.
+Tests overlap
+-------------
+When a new test is being proposed, question whether this feature is not already
+tested with Tempest. Tempest has more than 1200 tests, spread amongst many
+directories, so it's easy to introduce test duplication. For example, testing
+volume attachment to a server could be a compute test or a volume test, depending
+on how you see it. So one must look carefully in the entire code base for possible
+overlap. As a rule of thumb, the older a feature is, the more likely it's
+already tested.
+
+
Being explicit
--------------
When tests are being added that depend on a configurable feature or extension,
@@ -60,8 +80,8 @@
Configuration Options
---------------------
-With the introduction of the tempest external test plugin interface we needed
-to provide a stable contract for tempest's configuration options. This means
+With the introduction of the Tempest external test plugin interface we needed
+to provide a stable contract for Tempest's configuration options. This means
we can no longer simply remove a configuration option when it's no longer used.
Patches proposed that remove options without a deprecation cycle should not
be approved. Similarly when changing default values with configuration we need
@@ -91,7 +111,7 @@
anything backwards incompatible or would require a user to take note or do
something extra.
-.. _reno: http://docs.openstack.org/developer/reno/
+.. _reno: https://docs.openstack.org/reno/latest/
Deprecated Code
---------------
@@ -101,8 +121,8 @@
When to approve
---------------
- * Every patch needs two +2s before being approved.
- * Its ok to hold off on an approval until a subject matter expert reviews it
- * If a patch has already been approved but requires a trivial rebase to merge,
- you do not have to wait for a second +2, since the patch has already had
- two +2s.
+* Every patch needs two +2s before being approved.
+* Its ok to hold off on an approval until a subject matter expert reviews it
+* If a patch has already been approved but requires a trivial rebase to merge,
+ you do not have to wait for a second +2, since the patch has already had
+ two +2s.
diff --git a/bindep.txt b/bindep.txt
index 8914ade..efd3a10 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -1,5 +1,5 @@
# This file contains runtime (non-python) dependencies
-# More info at: http://docs.openstack.org/infra/bindep/readme.html
+# More info at: https://docs.openstack.org/infra/bindep/readme.html
libffi-dev [platform:dpkg]
libffi-devel [platform:rpm]
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
new file mode 100644
index 0000000..7745594
--- /dev/null
+++ b/doc/source/_extra/.htaccess
@@ -0,0 +1 @@
+redirectmatch 301 ^/developer/tempest/(.*) /tempest/latest/$1
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 23f732e..0a061b8 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -22,12 +22,8 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
import os
import subprocess
-import warnings
-
-import openstackdocstheme
# Build the plugin registry
def build_plugin_registry(app):
@@ -36,7 +32,8 @@
subprocess.call(['tools/generate-tempest-plugins-list.sh'], cwd=root_dir)
def setup(app):
- app.connect('builder-inited', build_plugin_registry)
+ if os.getenv('GENERATE_TEMPEST_PLUGIN_LIST', 'true').lower() == 'true':
+ app.connect('builder-inited', build_plugin_registry)
@@ -157,14 +154,13 @@
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
+# Add any paths that contain "extra" files, such as .htaccess or
+# robots.txt.
+html_extra_path = ['_extra']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = False
-
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 4accd94..e5f70d2 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -17,10 +17,10 @@
Tempest allows for configuring a set of admin credentials in the ``auth``
section, via the following parameters:
- #. ``admin_username``
- #. ``admin_password``
- #. ``admin_project_name``
- #. ``admin_domain_name``
+#. ``admin_username``
+#. ``admin_password``
+#. ``admin_project_name``
+#. ``admin_domain_name``
Admin credentials are not mandatory to run Tempest, but when provided they
can be used to:
@@ -28,7 +28,7 @@
- Run tests for admin APIs
- Generate test credentials on the fly (see `Dynamic Credentials`_)
-When keystone uses a policy that requires domain scoped tokens for admin
+When Keystone uses a policy that requires domain scoped tokens for admin
actions, the flag ``admin_domain_scope`` must be set to ``True``.
The admin user configured, if any, must have a role assigned to the domain to
be usable.
@@ -39,7 +39,7 @@
number of users available to run tests with.
You can specify the location of the file in the ``auth`` section in the
tempest.conf file. To see the specific format used in the file please refer to
-the accounts.yaml.sample file included in Tempest.
+the ``accounts.yaml.sample`` file included in Tempest.
Keystone Connection Info
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -47,18 +47,17 @@
to provide it with information about how it communicates with keystone.
This involves configuring the following options in the ``identity`` section:
- #. ``auth_version``
- #. ``uri``
- #. ``uri_v3``
+- ``auth_version``
+- ``uri``
+- ``uri_v3``
The ``auth_version`` option is used to tell Tempest whether it should be using
-keystone's v2 or v3 api for communicating with keystone. The two uri options are
+Keystone's v2 or v3 api for communicating with Keystone. The two uri options are
used to tell Tempest the url of the keystone endpoint. The ``uri`` option is
-used for keystone v2 request and ``uri_v3`` is used for keystone v3. You want to
+used for Keystone v2 request and ``uri_v3`` is used for Keystone v3. You want to
ensure that which ever version you set for ``auth_version`` has its uri option
defined.
-
Credential Provider Mechanisms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -75,12 +74,12 @@
an admin user, and an alternate user. To enable and use dynamic credentials you
only need to configure two things:
- #. A set of admin credentials with permissions to create users and
- projects. This is specified in the ``auth`` section with the
- ``admin_username``, ``admin_project_name``, ``admin_domain_name`` and
- ``admin_password`` options
- #. To enable dynamic credentials in the ``auth`` section with the
- ``use_dynamic_credentials`` option.
+#. A set of admin credentials with permissions to create users and
+ projects. This is specified in the ``auth`` section with the
+ ``admin_username``, ``admin_project_name``, ``admin_domain_name`` and
+ ``admin_password`` options
+#. To enable dynamic credentials in the ``auth`` section with the
+ ``use_dynamic_credentials`` option.
This is also currently the default credential provider enabled by Tempest, due
to its common use and ease of configuration.
@@ -95,7 +94,7 @@
When the ``admin_domain_scope`` option is set to ``True``, provisioned admin
accounts will be assigned a role on domain configured in
``default_credentials_domain_name``. This will make the accounts provisioned
-usable in a cloud where domain scoped tokens are required by keystone for
+usable in a cloud where domain scoped tokens are required by Keystone for
admin operations. Note that the initial pre-provision admin accounts,
configured in tempest.conf, must have a role on the same domain as well, for
Dynamic Credentials to work.
@@ -116,21 +115,21 @@
To enable and use locking test accounts you need do a few things:
- #. Create an accounts.yaml file which contains the set of pre-existing
- credentials to use for testing. To make sure you don't have a credentials
- starvation issue when running in parallel make sure you have at least two
- times the number of worker processes you are using to execute Tempest
- available in the file. (If running serially the worker count is 1.)
+#. Create an accounts.yaml file which contains the set of pre-existing
+ credentials to use for testing. To make sure you don't have a credentials
+ starvation issue when running in parallel make sure you have at least two
+ times the number of worker processes you are using to execute Tempest
+ available in the file. (If running serially the worker count is 1.)
- You can check the accounts.yaml.sample file packaged in Tempest for the yaml
- format.
- #. Provide Tempest with the location of your accounts.yaml file with the
- ``test_accounts_file`` option in the ``auth`` section
+ You can check the accounts.yaml.sample file packaged in Tempest for the yaml
+ format.
+#. Provide Tempest with the location of your accounts.yaml file with the
+ ``test_accounts_file`` option in the ``auth`` section
- *NOTE: Be sure to use a full path for the file; otherwise Tempest will
- likely not find it.*
+ *NOTE: Be sure to use a full path for the file; otherwise Tempest will
+ likely not find it.*
- #. Set ``use_dynamic_credentials = False`` in the ``auth`` group
+#. Set ``use_dynamic_credentials = False`` in the ``auth`` group
It is worth pointing out that each set of credentials in the accounts.yaml
should have a unique project. This is required to provide proper isolation
@@ -140,10 +139,10 @@
tenants it's using are empty and may sporadically fail if there are unexpected
resources present.
-When the keystone in the target cloud requires domain scoped tokens to
+When the Keystone in the target cloud requires domain scoped tokens to
perform admin actions, all pre-provisioned admin users must have a role
assigned on the domain where test accounts a provisioned.
-The option ``admin_domain_scope`` is used to tell tempest that domain scoped
+The option ``admin_domain_scope`` is used to tell Tempest that domain scoped
tokens shall be used. ``default_credentials_domain_name`` is the domain where
test accounts are expected to be provisioned if no domain is specified.
@@ -163,11 +162,11 @@
can use to boot the servers with. There are two options in the Tempest config
for doing this:
- #. ``flavor_ref``
- #. ``flavor_ref_alt``
+#. ``flavor_ref``
+#. ``flavor_ref_alt``
Both of these options are in the ``compute`` section of the config file and take
-in the flavor id (not the name) from nova. The ``flavor_ref`` option is what
+in the flavor id (not the name) from Nova. The ``flavor_ref`` option is what
will be used for booting almost all of the guests; ``flavor_ref_alt`` is only
used in tests where two different-sized servers are required (for example, a
resize test).
@@ -182,10 +181,10 @@
Just like with flavors, Tempest needs to know which images to use for booting
servers. There are two options in the compute section just like with flavors:
- #. ``image_ref``
- #. ``image_ref_alt``
+#. ``image_ref``
+#. ``image_ref_alt``
-Both options are expecting an image id (not name) from nova. The ``image_ref``
+Both options are expecting an image id (not name) from Nova. The ``image_ref``
option is what will be used for booting the majority of servers in Tempest.
``image_ref_alt`` is used for tests that require two images such as rebuild. If
two images are not available you can set both options to the same image id and
@@ -193,13 +192,13 @@
There are also options in the ``scenario`` section for images:
- #. ``img_file``
- #. ``img_dir``
- #. ``aki_img_file``
- #. ``ari_img_file``
- #. ``ami_img_file``
- #. ``img_container_format``
- #. ``img_disk_format``
+#. ``img_file``
+#. ``img_dir``
+#. ``aki_img_file``
+#. ``ari_img_file``
+#. ``ami_img_file``
+#. ``img_container_format``
+#. ``img_disk_format``
However, unlike the other image options, these are used for a very small subset
of scenario tests which are uploading an image. These options are used to tell
@@ -224,7 +223,7 @@
Networking
----------
OpenStack has a myriad of different networking configurations possible and
-depending on which of the two network backends, nova-network or neutron, you are
+depending on which of the two network backends, nova-network or Neutron, you are
using things can vary drastically. Due to this complexity Tempest has to provide
a certain level of flexibility in its configuration to ensure it will work
against any cloud. This ends up causing a large number of permutations in
@@ -262,7 +261,7 @@
To set a fixed network name simply:
- #. Set the ``fixed_network_name`` option in the ``compute`` group
+#. Set the ``fixed_network_name`` option in the ``compute`` group
In the case that the configured fixed network name can not be found by a user
network list call, it will be treated like one was not provided except that a
@@ -302,21 +301,21 @@
server with multiple networks. If this is not the case for your cloud then using
an accounts file is recommended because it provides the necessary flexibility to
describe your configuration. Dynamic credentials is not able to dynamically
-allocate things as necessary if neutron is not enabled.
+allocate things as necessary if Neutron is not enabled.
-With neutron and dynamic credentials enabled there should not be any additional
+With Neutron and dynamic credentials enabled there should not be any additional
configuration necessary to enable Tempest to create servers with working
networking, assuming you have properly configured the ``network`` section to
-work for your cloud. Tempest will dynamically create the neutron resources
+work for your cloud. Tempest will dynamically create the Neutron resources
necessary to enable using servers with that network. Also, just as with the
-accounts file, if you specify a fixed network name while using neutron and
+accounts file, if you specify a fixed network name while using Neutron and
dynamic credentials it will enable running tests which require a static network
and it will additionally be used as a fallback for server creation. However,
unlike accounts.yaml this should never be triggered.
However, there is an option ``create_isolated_networks`` to disable dynamic
credentials's automatic provisioning of network resources. If this option is set
-to False you will have to either rely on there only being a single/default
+to ``False`` you will have to either rely on there only being a single/default
network available for the server creation, or use ``fixed_network_name`` to
inform Tempest which network to use.
@@ -330,22 +329,22 @@
To enable remote access to servers, there are 3 options at a minimum that are used:
- #. ``run_validation``
- #. ``connect_method``
- #. ``auth_method``
+#. ``run_validation``
+#. ``connect_method``
+#. ``auth_method``
The ``run_validation`` is used to enable or disable ssh connectivity for
all tests (with the exception of scenario tests which do not have a flag for
-enabling or disabling ssh) To enable ssh connectivity this needs be set to ``true``.
+enabling or disabling ssh) To enable ssh connectivity this needs be set to ``True``.
-The ``connect_method`` option is used to tell tempest what kind of IP to use for
+The ``connect_method`` option is used to tell Tempest what kind of IP to use for
establishing a connection to the server. Two methods are available: ``fixed``
and ``floating``, the later being set by default. If this is set to floating
-tempest will create a floating ip for the server before attempted to connect
+Tempest will create a floating ip for the server before attempted to connect
to it. The IP for the floating ip is what is used for the connection.
For the ``auth_method`` option there is currently, only one valid option,
-``keypair``. With this set to ``keypair`` tempest will create an ssh keypair
+``keypair``. With this set to ``keypair`` Tempest will create an ssh keypair
and use that for authenticating against the created server.
Configuring Available Services
@@ -359,8 +358,8 @@
The ``service_available`` section of the config file is used to set which
services are available. It contains a boolean option for each service (except
-for keystone which is a hard requirement) set it to True if the service is
-available or False if it is not.
+for Keystone which is a hard requirement) set it to ``True`` if the service is
+available or ``False`` if it is not.
Service Catalog
^^^^^^^^^^^^^^^
@@ -371,9 +370,9 @@
service catalog. There are three options for each service section to accomplish
this:
- #. ``catalog_type``
- #. ``endpoint_type``
- #. ``region``
+#. ``catalog_type``
+#. ``endpoint_type``
+#. ``region``
Setting ``catalog_type`` and ``endpoint_type`` should normally give Tempest
enough information to determine which endpoint it should pull from the service
@@ -382,17 +381,18 @@
service you can set the ``region`` option in that service's section.
It should also be noted that the default values for these options are set
-to what devstack uses (which is a de facto standard for service catalog
+to what DevStack uses (which is a de facto standard for service catalog
entries). So often nothing actually needs to be set on these options to enable
communication to a particular service. It is only if you are either not using
-the same ``catalog_type`` as devstack or you want Tempest to talk to a different
-endpoint type instead of publicURL for a service that these need to be changed.
+the same ``catalog_type`` as DevStack or you want Tempest to talk to a different
+endpoint type instead of ``publicURL`` for a service that these need to be
+changed.
.. note::
Tempest does not serve all kinds of fancy URLs in the service catalog. The
service catalog should be in a standard format (which is going to be
- standardized at the keystone level).
+ standardized at the Keystone level).
Tempest expects URLs in the Service catalog in the following format:
* ``http://example.com:1234/<version-info>``
@@ -420,7 +420,7 @@
configured as to which optional features are enabled. This is in order to
prevent bugs in the discovery mechanisms from masking failures.
-The service feature-enabled config sections are how Tempest addresses the
+The service ``feature-enabled`` config sections are how Tempest addresses the
optional feature question. Each service that has tests for optional features
contains one of these sections. The only options in it are boolean options
with the name of a feature which is used. If it is set to false any test which
@@ -431,7 +431,7 @@
API Extensions
^^^^^^^^^^^^^^
The service feature-enabled sections often contain an ``api-extensions`` option
-(or in the case of swift a ``discoverable_apis`` option). This is used to tell
+(or in the case of Swift a ``discoverable_apis`` option). This is used to tell
Tempest which api extensions (or configurable middleware) is used in your
deployment. It has two valid config states: either it contains a single value
``all`` (which is the default) which means that every api extension is assumed
diff --git a/data/tempest-plugins-registry.header b/doc/source/data/tempest-plugins-registry.header
similarity index 100%
rename from data/tempest-plugins-registry.header
rename to doc/source/data/tempest-plugins-registry.header
diff --git a/doc/source/library.rst b/doc/source/library.rst
index 29248d1..14415ae 100644
--- a/doc/source/library.rst
+++ b/doc/source/library.rst
@@ -44,9 +44,9 @@
existing interfaces we have to be careful to make sure we don't break any
external consumers. Some common red flags are:
- * a change to an existing API requires a change outside the library directory
- where the interface is being consumed
- * a unit test has to be significantly changed to make the proposed change pass
+* a change to an existing API requires a change outside the library directory
+ where the interface is being consumed
+* a unit test has to be significantly changed to make the proposed change pass
Testing
'''''''
@@ -68,3 +68,5 @@
library/api_microversion_testing
library/auth
library/clients
+ library/credential_providers
+ library/validation_resources
diff --git a/doc/source/library/credential_providers.rst b/doc/source/library/credential_providers.rst
new file mode 100644
index 0000000..d96c97a
--- /dev/null
+++ b/doc/source/library/credential_providers.rst
@@ -0,0 +1,147 @@
+.. _cred_providers:
+
+Credential Providers
+====================
+
+These library interfaces are used to deal with allocating credentials on demand
+either dynamically by calling keystone to allocate new credentials, or from
+a list of preprovisioned credentials. These 2 modules are implementations of
+the same abstract credential providers class and can be used interchangeably.
+However, each implementation has some additional parameters that are used to
+influence the behavior of the modules. The API reference at the bottom of this
+doc shows the interface definitions for both modules, however that may be a bit
+opaque. You can see some examples of how to leverage this interface below.
+
+Initialization Example
+----------------------
+This example is from Tempest itself (from tempest/common/credentials_factory.py
+just modified slightly) and is how it initializes the credential provider based
+on config::
+
+ from tempest import config
+ from tempest.lib.common import dynamic_creds
+ from tempest.lib.common import preprov_creds
+
+ CONF = config.CONF
+
+ def get_credentials_provider(name, network_resources=None,
+ force_tenant_isolation=False,
+ identity_version=None):
+ # If a test requires a new account to work, it can have it via forcing
+ # dynamic credentials. A new account will be produced only for that test.
+ # In case admin credentials are not available for the account creation,
+ # the test should be skipped else it would fail.
+ identity_version = identity_version or CONF.identity.auth_version
+ if CONF.auth.use_dynamic_credentials or force_tenant_isolation:
+ admin_creds = get_configured_admin_credentials(
+ fill_in=True, identity_version=identity_version)
+ return dynamic_creds.DynamicCredentialProvider(
+ name=name,
+ network_resources=network_resources,
+ identity_version=identity_version,
+ admin_creds=admin_creds,
+ identity_admin_domain_scope=CONF.identity.admin_domain_scope,
+ identity_admin_role=CONF.identity.admin_role,
+ extra_roles=CONF.auth.tempest_roles,
+ neutron_available=CONF.service_available.neutron,
+ project_network_cidr=CONF.network.project_network_cidr,
+ project_network_mask_bits=CONF.network.project_network_mask_bits,
+ public_network_id=CONF.network.public_network_id,
+ create_networks=(CONF.auth.create_isolated_networks and not
+ CONF.network.shared_physical_network),
+ resource_prefix=CONF.resources_prefix,
+ credentials_domain=CONF.auth.default_credentials_domain_name,
+ admin_role=CONF.identity.admin_role,
+ identity_uri=CONF.identity.uri_v3,
+ identity_admin_endpoint_type=CONF.identity.v3_endpoint_type)
+ else:
+ if CONF.auth.test_accounts_file:
+ # Most params are not relevant for pre-created accounts
+ return preprov_creds.PreProvisionedCredentialProvider(
+ name=name, identity_version=identity_version,
+ accounts_lock_dir=lockutils.get_lock_path(CONF),
+ test_accounts_file=CONF.auth.test_accounts_file,
+ object_storage_operator_role=CONF.object_storage.operator_role,
+ object_storage_reseller_admin_role=reseller_admin_role,
+ credentials_domain=CONF.auth.default_credentials_domain_name,
+ admin_role=CONF.identity.admin_role,
+ identity_uri=CONF.identity.uri_v3,
+ identity_admin_endpoint_type=CONF.identity.v3_endpoint_type)
+ else:
+ raise exceptions.InvalidConfiguration(
+ 'A valid credential provider is needed')
+
+This function just returns an initialized credential provider class based on the
+config file. The consumer of this function treats the output as the same
+regardless of whether it's a dynamic or preprovisioned provider object.
+
+Dealing with Credentials
+------------------------
+
+Once you have a credential provider object created the access patterns for
+allocating and removing credentials are the same across both the dynamic
+and preprovisioned credentials. These are defined in the abstract
+CredentialProvider class. At a high level the credentials provider enables
+you to get 3 basic types of credentials at once (per object): a primary, alt,
+and admin. You're also able to allocate a credential by role. These credentials
+are tracked by the provider object and delete must manually be called otherwise
+the created resources will not be deleted (or returned to the pool in the case
+of preprovisioned creds)
+
+Examples
+''''''''
+
+Continuing from the example above, to allocate credentials by the 3 basic types
+you can do the following::
+
+ provider = get_credentials_provider('my_tests')
+ primary_creds = provider.get_primary_creds()
+ alt_creds = provider.get_alt_creds()
+ admin_creds = provider.get_admin_creds()
+ # Make sure to delete the credentials when you're finished
+ provider.clear_creds()
+
+To create and interact with credentials by role you can do the following::
+
+ provider = get_credentials_provider('my_tests')
+ my_role_creds = provider.get_creds_by_role({'roles': ['my_role']})
+ # provider.clear_creds() will clear all creds including those allocated by
+ # role
+ provider.clear_creds()
+
+When multiple roles are specified a set of creds with all the roles assigned
+will be allocated::
+
+ provider = get_credentials_provider('my_tests')
+ my_role_creds = provider.get_creds_by_role({'roles': ['my_role',
+ 'my_other_role']})
+ # provider.clear_creds() will clear all creds including those allocated by
+ # role
+ provider.clear_creds()
+
+If you need multiple sets of credentials with the same roles you can also do
+this by leveraging the ``force_new`` kwarg::
+
+ provider = get_credentials_provider('my_tests')
+ my_role_creds = provider.get_creds_by_role({'roles': ['my_role']})
+ my_role_other_creds = provider.get_creds_by_role({'roles': ['my_role']},
+ force_new=True)
+ # provider.clear_creds() will clear all creds including those allocated by
+ # role
+ provider.clear_creds()
+
+
+API Reference
+-------------
+
+The dynamic credentials module
+''''''''''''''''''''''''''''''
+
+.. automodule:: tempest.lib.common.dynamic_creds
+ :members:
+
+The pre-provisioned credentials module
+''''''''''''''''''''''''''''''''''''''
+
+.. automodule:: tempest.lib.common.preprov_creds
+ :members:
diff --git a/doc/source/library/validation_resources.rst b/doc/source/library/validation_resources.rst
new file mode 100644
index 0000000..9b36476
--- /dev/null
+++ b/doc/source/library/validation_resources.rst
@@ -0,0 +1,11 @@
+.. _validation_resources:
+
+Validation Resources
+====================
+
+-------------------------------
+The validation_resources module
+-------------------------------
+
+.. automodule:: tempest.lib.common.validation_resources
+ :members:
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index d6d90ba..7189312 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -33,46 +33,46 @@
Tempest will cover only integration testing of applicable microversions with
below exceptions:
- #. Test covers a feature which is important for interoperability. This covers tests requirement
- from Defcore.
- #. Test needed to fill Schema gaps.
- Tempest validates API responses with defined JSON schema. API responses can be different on
- each microversion and the JSON schemas need to be defined separately for the microversion.
- While implementing new integration tests for a specific microversion, there
- may be a gap in the JSON schemas (caused by previous microversions) implemented
- in Tempest.
- Filling that gap while implementing the new integration test cases is not efficient due to
- many reasons:
+#. Test covers a feature which is important for interoperability. This covers tests requirement
+ from Defcore.
+#. Test needed to fill Schema gaps.
+ Tempest validates API responses with defined JSON schema. API responses can be different on
+ each microversion and the JSON schemas need to be defined separately for the microversion.
+ While implementing new integration tests for a specific microversion, there
+ may be a gap in the JSON schemas (caused by previous microversions) implemented
+ in Tempest.
+ Filling that gap while implementing the new integration test cases is not efficient due to
+ many reasons:
- * Hard to review
- * Sync between multiple integration tests patches which try to fill the same schema gap at same
- time
- * Might delay the microversion change on project side where project team wants Tempest
- tests to verify the results.
+ * Hard to review
+ * Sync between multiple integration tests patches which try to fill the same schema gap at same
+ time
+ * Might delay the microversion change on project side where project team wants Tempest
+ tests to verify the results.
- Tempest will allow to fill the schema gaps at the end of each cycle, or more
- often if required.
- Schema gap can be filled with testing those with a minimal set of tests. Those
- tests might not be integration tests and might be already covered on project
- side also.
- This exception is needed because:
+ Tempest will allow to fill the schema gaps at the end of each cycle, or more
+ often if required.
+ Schema gap can be filled with testing those with a minimal set of tests. Those
+ tests might not be integration tests and might be already covered on project
+ side also.
+ This exception is needed because:
- * Allow to create microversion response schema in Tempest at the same time that projects are
- implementing their API microversions. This will make implementation easier for adding
- required tests before a new microversion change can be merged in the corresponding project
- and hence accelerate the development of microversions.
- * New schema must be verified by at least one test case which exercises such schema.
+ * Allow to create microversion response schema in Tempest at the same time that projects are
+ implementing their API microversions. This will make implementation easier for adding
+ required tests before a new microversion change can be merged in the corresponding project
+ and hence accelerate the development of microversions.
+ * New schema must be verified by at least one test case which exercises such schema.
- For example:
- If any projects implemented 4 API microversion say- v2.3, v2.4, v2.5, v2.6
- Assume microversion v2.3, v2.4, v2.6 change the API Response which means Tempest
- needs to add JSON schema for v2.3, v2.4, v2.6.
- In that case if only 1 or 2 tests can verify all new schemas then we do not need
- separate tests for each new schemas. In worst case, we have to add 3 separate tests.
- #. Test covers service behavior at large scale with involvement of more deep layer like hypervisor
- etc not just API/DB layer. This type of tests will be added case by case basis and
- with project team consultation about why it cannot be covered on project side and worth to test
- in Tempest.
+ For example:
+ If any projects implemented 4 API microversion say- v2.3, v2.4, v2.5, v2.6
+ Assume microversion v2.3, v2.4, v2.6 change the API Response which means Tempest
+ needs to add JSON schema for v2.3, v2.4, v2.6.
+ In that case if only 1 or 2 tests can verify all new schemas then we do not need
+ separate tests for each new schemas. In worst case, we have to add 3 separate tests.
+#. Test covers service behavior at large scale with involvement of more deep layer like hypervisor
+ etc not just API/DB layer. This type of tests will be added case by case basis and
+ with project team consultation about why it cannot be covered on project side and worth to test
+ in Tempest.
Project Scope For Microversion Testing
""""""""""""""""""""""""""""""""""""""
@@ -111,7 +111,7 @@
This document explains how to implement Microversion tests using those
interfaces.
-.. _API Microversion testing Framework: http://docs.openstack.org/developer/tempest/library/api_microversion_testing.html
+.. _API Microversion testing Framework: https://docs.openstack.org/tempest/latest/library/api_microversion_testing.html
Step1: Add skip logic based on configured Microversion range
@@ -294,48 +294,72 @@
* Compute
- * `2.1`_
+ * `2.1`_
- .. _2.1: http://docs.openstack.org/developer/nova/api_microversion_history.html#id1
+ .. _2.1: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id1
- * `2.2`_
+ * `2.2`_
- .. _2.2: http://docs.openstack.org/developer/nova/api_microversion_history.html#id2
+ .. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
- * `2.10`_
+ * `2.10`_
- .. _2.10: http://docs.openstack.org/developer/nova/api_microversion_history.html#id9
+ .. _2.10: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id9
- * `2.20`_
+ * `2.20`_
- .. _2.20: http://docs.openstack.org/developer/nova/api_microversion_history.html#id18
+ .. _2.20: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id18
- * `2.25`_
+ * `2.25`_
- .. _2.25: http://docs.openstack.org/developer/nova/api_microversion_history.html#maximum-in-mitaka
+ .. _2.25: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-mitaka
- * `2.32`_
+ * `2.32`_
- .. _2.32: http://docs.openstack.org/developer/nova/api_microversion_history.html#id29
+ .. _2.32: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id29
- * `2.37`_
+ * `2.37`_
- .. _2.37: http://docs.openstack.org/developer/nova/api_microversion_history.html#id34
+ .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
- * `2.42`_
+ * `2.42`_
- .. _2.42: http://docs.openstack.org/developer/nova/api_microversion_history.html#maximum-in-ocata
+ .. _2.42: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-ocata
- * `2.47`_
+ * `2.47`_
- .. _2.47: http://docs.openstack.org/developer/nova/api_microversion_history.html#id42
+ .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
- * `2.48`_
+ * `2.48`_
- .. _2.48: http://docs.openstack.org/developer/nova/api_microversion_history.html#id43
+ .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
* Volume
- * `3.3`_
+ * `3.3`_
- .. _3.3: https://docs.openstack.org/developer/cinder/devref/api_microversion_history.html#id4
+ .. _3.3: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id3
+
+ * `3.9`_
+
+ .. _3.9: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id9
+
+ * `3.11`_
+
+ .. _3.11: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id11
+
+ * `3.12`_
+
+ .. _3.12: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id12
+
+ * `3.14`_
+
+ .. _3.14: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id14
+
+ * `3.19`_
+
+ .. _3.19: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id18
+
+ * `3.20`_
+
+ .. _3.20: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id19
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index b3af92f..2afb1e5 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -28,6 +28,9 @@
* tempest.lib.*
* tempest.config
* tempest.test_discover.plugins
+* tempest.common.credentials_factory
+* tempest.clients
+* tempest.test
If there is an interface from tempest that you need to rely on in your plugin
which is not listed above, it likely needs to be migrated to tempest.lib. In
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index d06e4ba..b57e98f 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -29,19 +29,19 @@
In the proposal etherpad we'll be looking for answers to 3 questions
- #. The tests proposed for removal must have equiv. coverage in a different
- project's test suite (whether this is another gating test project, or an in
- tree functional test suite). For API tests preferably the other project will
- have a similar source of friction in place to prevent breaking api changes
- so that we don't regress and let breaking api changes slip through the
- gate.
- #. The test proposed for removal has a failure rate < 0.50% in the gate over
- the past release (the value and interval will likely be adjusted in the
- future)
+#. The tests proposed for removal must have equiv. coverage in a different
+ project's test suite (whether this is another gating test project, or an in
+ tree functional test suite). For API tests preferably the other project will
+ have a similar source of friction in place to prevent breaking api changes
+ so that we don't regress and let breaking api changes slip through the
+ gate.
+#. The test proposed for removal has a failure rate < 0.50% in the gate over
+ the past release (the value and interval will likely be adjusted in the
+ future)
- .. _`prong #3`:
- #. There must not be an external user/consumer of tempest
- that depends on the test proposed for removal
+ .. _`prong #3`:
+#. There must not be an external user/consumer of tempest
+ that depends on the test proposed for removal
The answers to 1 and 2 are easy to verify. For 1 just provide a link to the new
test location. If you are linking to the tempest removal patch please also put
@@ -68,23 +68,23 @@
You can access the infra mysql subunit2sql db w/ read-only permissions with:
- * hostname: logstash.openstack.org
- * username: query
- * password: query
- * db_name: subunit2sql
+* hostname: logstash.openstack.org
+* username: query
+* password: query
+* db_name: subunit2sql
For example if you were trying to remove the test with the id:
tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON.test_get_flavor_details_for_deleted_flavor
you would run the following:
- #. run: "mysql -u query -p -h logstash.openstack.org subunit2sql" to connect
- to the subunit2sql db
- #. run the query: MySQL [subunit2sql]> select * from tests where test_id like
- "tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON%";
- which will return a table of all the tests in the class (but it will also
- catch failures in setUpClass and tearDownClass)
- #. paste the output table with numbers and the mysql command you ran to
- generate it into the etherpad.
+#. run: "mysql -u query -p -h logstash.openstack.org subunit2sql" to connect
+ to the subunit2sql db
+#. run the query: MySQL [subunit2sql]> select * from tests where test_id like
+ "tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON%";
+ which will return a table of all the tests in the class (but it will also
+ catch failures in setUpClass and tearDownClass)
+#. paste the output table with numbers and the mysql command you ran to
+ generate it into the etherpad.
Eventually a cli interface will be created to make that a bit more friendly.
Also a dashboard is in the works so we don't need to manually run the command.
@@ -131,23 +131,23 @@
For the most part all tempest test removals have to go through this procedure
there are a couple of exceptions though:
- #. The class of testing has been decided to be outside the scope of tempest.
- #. A revert for a patch which added a broken test, or testing which didn't
- actually run in the gate (basically any revert for something which
- shouldn't have been added)
- #. Tests that would become out of scope as a consequence of an API change,
- as described in `API Compatibility`_.
- Such tests cannot live in Tempest because of the branchless nature of
- Tempest. Such test must still honor `prong #3`_.
+#. The class of testing has been decided to be outside the scope of tempest.
+#. A revert for a patch which added a broken test, or testing which didn't
+ actually run in the gate (basically any revert for something which
+ shouldn't have been added)
+#. Tests that would become out of scope as a consequence of an API change,
+ as described in `API Compatibility`_.
+ Such tests cannot live in Tempest because of the branchless nature of
+ Tempest. Such test must still honor `prong #3`_.
For the first exception type the only types of testing in tree which have been
declared out of scope at this point are:
- * The CLI tests (which should be completely removed at this point)
- * Neutron Adv. Services testing (which should be completely removed at this
- point)
- * XML API Tests (which should be completely removed at this point)
- * EC2 API/boto tests (which should be completely removed at this point)
+* The CLI tests (which should be completely removed at this point)
+* Neutron Adv. Services testing (which should be completely removed at this
+ point)
+* XML API Tests (which should be completely removed at this point)
+* EC2 API/boto tests (which should be completely removed at this point)
For tests that fit into this category the only criteria for removal is that
there is equivalent testing elsewhere.
@@ -159,19 +159,19 @@
are defined as in scope for direct testing in tempest. As of today that list
is:
- * Keystone
- * Nova
- * Glance
- * Cinder
- * Neutron
- * Swift
+* Keystone
+* Nova
+* Glance
+* Cinder
+* Neutron
+* Swift
anything that lives in tempest which doesn't test one of these projects can be
removed assuming there is equivalent testing elsewhere. Preferably using the
`tempest plugin mechanism`_
to maintain continuity after migrating the tests out of tempest.
-.. _tempest plugin mechanism: http://docs.openstack.org/developer/tempest/plugin.html
+.. _tempest plugin mechanism: https://docs.openstack.org/tempest/latest/plugin.html
API Compatibility
"""""""""""""""""
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 4e3bfa2..49af95a 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -6,13 +6,13 @@
This guide serves as a starting point for developers working on writing new
Tempest tests. At a high level tests in Tempest are just tests that conform to
the standard python `unit test`_ framework. But there are several aspects of
-that are unique to tempest and it's role as an integration test suite running
+that are unique to Tempest and its role as an integration test suite running
against a real cloud.
.. _unit test: https://docs.python.org/3.6/library/unittest.html
-.. note:: This guide is for writing tests in the tempest repository. While many
- parts of this guide are also applicable to tempest plugins, not all
+.. note:: This guide is for writing tests in the Tempest repository. While many
+ parts of this guide are also applicable to Tempest plugins, not all
the APIs mentioned are considered stable or recommended for use in
plugins. Please refer to :ref:`tempest_plugin` for details about
writing plugins
@@ -24,8 +24,8 @@
The base unit of testing in Tempest is the `TestCase`_ (also called the test
class). Each TestCase contains test methods which are the individual tests that
will be executed by the test runner. But, the TestCase is the smallest self
-contained unit for tests from the tempest perspective. It's also the level at
-which tempest is parallel safe. In other words, multiple TestCases can be
+contained unit for tests from the Tempest perspective. It's also the level at
+which Tempest is parallel safe. In other words, multiple TestCases can be
executed in parallel, but individual test methods in the same TestCase can not.
Also, all test methods within a TestCase are assumed to be executed serially. As
such you can use the test case to store variables that are shared between
@@ -36,12 +36,12 @@
In standard unittest the lifecycle of a TestCase can be described in the
following phases:
- #. setUpClass
- #. setUp
- #. Test Execution
- #. tearDown
- #. doCleanups
- #. tearDownClass
+#. setUpClass
+#. setUp
+#. Test Execution
+#. tearDown
+#. doCleanups
+#. tearDownClass
setUpClass
----------
@@ -54,15 +54,21 @@
To accomplish this you do **not** define a setUpClass function, instead there
are a number of predefined phases to setUpClass that are used. The phases are:
- * skip_checks
- * setup_credentials
- * setup_clients
- * resource_setup
+* skip_checks
+* setup_credentials
+* setup_clients
+* resource_setup
-which is executed in that order. An example of a TestCase which defines all
+which is executed in that order. Cleanup of resources provisioned during
+the resource_setup must be scheduled right after provisioning using
+the addClassResourceCleanp helper. The resource cleanups stacked this way
+are executed in reverse order during tearDownClass, before the cleanup of
+test credentials takes place. An example of a TestCase which defines all
of these would be::
+ from tempest.common import waiters
from tempest import config
+ from tempest.lib.common.utils import test_utils
from tempest import test
CONF = config.CONF
@@ -111,6 +117,13 @@
"""
super(TestExampleCase, cls).resource_setup()
cls.shared_server = cls.servers_client.create_server(...)
+ cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+ cls.servers_client,
+ cls.shared_server['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc(
+ cls.servers_client.delete_server,
+ cls.shared_server['id']))
.. _credentials:
@@ -151,7 +164,7 @@
You can also specify credentials with specific roles assigned. This is useful
for cases where there are specific RBAC requirements hard coded into an API.
The canonical example of this are swift tests which often want to test swift's
-concepts of operator and reseller_admin. An actual example from tempest on how
+concepts of operator and reseller_admin. An actual example from Tempest on how
to do this is::
class PublicObjectTest(base.BaseObjectTest):
@@ -187,7 +200,7 @@
| [$label, $role] | cls.os_roles_$label |
+-------------------+---------------------+
-By default cls.os_primary is available since it is allocated in the base tempest test
+By default cls.os_primary is available since it is allocated in the base Tempest test
class (located in tempest/test.py). If your TestCase inherits from a different
direct parent class (it'll still inherit from the BaseTestCase, just not
directly) be sure to check if that class overrides allocated credentials.
@@ -195,8 +208,8 @@
Dealing with Network Allocation
'''''''''''''''''''''''''''''''
-When neutron is enabled and a testing requires networking this isn't normally
-automatically setup when a tenant is created. Since tempest needs isolated
+When Neutron is enabled and a testing requires networking this isn't normally
+automatically setup when a tenant is created. Since Tempest needs isolated
tenants to function properly it also needs to handle network allocation. By
default the base test class will allocate a network, subnet, and router
automatically (this depends on the configured credential provider, for more
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
new file mode 100644
index 0000000..a684984
--- /dev/null
+++ b/playbooks/devstack-tempest.yaml
@@ -0,0 +1,14 @@
+# Changes that run through devstack-tempest are likely to have an impact on
+# the devstack part of the job, so we keep devstack in the main play to
+# avoid zuul retrying on legitimate failures.
+- hosts: all
+ roles:
+ - run-devstack
+
+# We run tests only on one node, regardless how many nodes are in the system
+- hosts: tempest
+ roles:
+ - setup-tempest-run-dir
+ - setup-tempest-data-dir
+ - acl-devstack-files
+ - run-tempest
diff --git a/playbooks/post-tempest.yaml b/playbooks/post-tempest.yaml
new file mode 100644
index 0000000..820e4f6
--- /dev/null
+++ b/playbooks/post-tempest.yaml
@@ -0,0 +1,24 @@
+- hosts: all
+ become: true
+ vars:
+ logs_root: "{{ devstack_base_dir|default('/opt/stack') }}"
+ stage_dir: "{{ devstack_base_dir|default('/opt/stack') }}"
+ test_results_stage_name: 'test_results'
+ roles:
+ - role: process-test-results
+ test_results_dir: '{{ logs_root }}/tempest'
+ tox_envdir: tempest
+ - role: process-stackviz
+ - role: stage-output
+ zuul_copy_output:
+ { '{{ logs_root }}/tempest/etc/tempest.conf': 'logs',
+ '{{ logs_root }}/tempest/etc/accounts.yaml': 'logs',
+ '{{ logs_root }}/tempest/tempest.log': 'logs',
+ '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': 'logs',
+ '{{ stage_dir }}/{{ test_results_stage_name }}.html': 'logs',
+ '{{ stage_dir }}/stackviz': 'logs' }
+ extensions_to_txt:
+ - conf
+ - log
+ - yaml
+ - yml
diff --git a/releasenotes/notes/10.0-supported-openstack-releases-b88db468695348f6.yaml b/releasenotes/notes/10/10.0-supported-openstack-releases-b88db468695348f6.yaml
similarity index 100%
rename from releasenotes/notes/10.0-supported-openstack-releases-b88db468695348f6.yaml
rename to releasenotes/notes/10/10.0-supported-openstack-releases-b88db468695348f6.yaml
diff --git a/releasenotes/notes/10.0.0-Tempest-library-interface-0eb680b810139a50.yaml b/releasenotes/notes/10/10.0.0-Tempest-library-interface-0eb680b810139a50.yaml
similarity index 75%
rename from releasenotes/notes/10.0.0-Tempest-library-interface-0eb680b810139a50.yaml
rename to releasenotes/notes/10/10.0.0-Tempest-library-interface-0eb680b810139a50.yaml
index 0ed3130..c1edd63 100644
--- a/releasenotes/notes/10.0.0-Tempest-library-interface-0eb680b810139a50.yaml
+++ b/releasenotes/notes/10/10.0.0-Tempest-library-interface-0eb680b810139a50.yaml
@@ -5,7 +5,7 @@
it lives directly in the tempest project. For more information refer to
the `library docs`_.
- .. _library docs: http://docs.openstack.org/developer/tempest/library.html#library
+ .. _library docs: https://docs.openstack.org/tempest/latest/library.html#current-library-apis
features:
- Tempest library interface
diff --git a/releasenotes/notes/10.0.0-start-using-reno-ed9518126fd0e1a3.yaml b/releasenotes/notes/10/10.0.0-start-using-reno-ed9518126fd0e1a3.yaml
similarity index 100%
rename from releasenotes/notes/10.0.0-start-using-reno-ed9518126fd0e1a3.yaml
rename to releasenotes/notes/10/10.0.0-start-using-reno-ed9518126fd0e1a3.yaml
diff --git a/releasenotes/notes/11.0.0-api-microversion-testing-support-2ceddd2255670932.yaml b/releasenotes/notes/11/11.0.0-api-microversion-testing-support-2ceddd2255670932.yaml
similarity index 100%
rename from releasenotes/notes/11.0.0-api-microversion-testing-support-2ceddd2255670932.yaml
rename to releasenotes/notes/11/11.0.0-api-microversion-testing-support-2ceddd2255670932.yaml
diff --git a/releasenotes/notes/11.0.0-compute-microversion-support-e0b23f960f894b9b.yaml b/releasenotes/notes/11/11.0.0-compute-microversion-support-e0b23f960f894b9b.yaml
similarity index 100%
rename from releasenotes/notes/11.0.0-compute-microversion-support-e0b23f960f894b9b.yaml
rename to releasenotes/notes/11/11.0.0-compute-microversion-support-e0b23f960f894b9b.yaml
diff --git a/releasenotes/notes/11.0.0-supported-openstack-releases-1e5d7295d939d439.yaml b/releasenotes/notes/11/11.0.0-supported-openstack-releases-1e5d7295d939d439.yaml
similarity index 100%
rename from releasenotes/notes/11.0.0-supported-openstack-releases-1e5d7295d939d439.yaml
rename to releasenotes/notes/11/11.0.0-supported-openstack-releases-1e5d7295d939d439.yaml
diff --git a/releasenotes/notes/12.0.0-supported-openstack-releases-f10aac381d933dd1.yaml b/releasenotes/notes/12/12.0.0-supported-openstack-releases-f10aac381d933dd1.yaml
similarity index 100%
rename from releasenotes/notes/12.0.0-supported-openstack-releases-f10aac381d933dd1.yaml
rename to releasenotes/notes/12/12.0.0-supported-openstack-releases-f10aac381d933dd1.yaml
diff --git a/releasenotes/notes/12.1.0-add-network-versions-client-d90e8334e1443f5c.yaml b/releasenotes/notes/12/12.1.0-add-network-versions-client-d90e8334e1443f5c.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-add-network-versions-client-d90e8334e1443f5c.yaml
rename to releasenotes/notes/12/12.1.0-add-network-versions-client-d90e8334e1443f5c.yaml
diff --git a/releasenotes/notes/12.1.0-add-scope-to-auth-b5a82493ea89f41e.yaml b/releasenotes/notes/12/12.1.0-add-scope-to-auth-b5a82493ea89f41e.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-add-scope-to-auth-b5a82493ea89f41e.yaml
rename to releasenotes/notes/12/12.1.0-add-scope-to-auth-b5a82493ea89f41e.yaml
diff --git a/releasenotes/notes/12.1.0-add-tempest-run-3d0aaf69c2ca4115.yaml b/releasenotes/notes/12/12.1.0-add-tempest-run-3d0aaf69c2ca4115.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-add-tempest-run-3d0aaf69c2ca4115.yaml
rename to releasenotes/notes/12/12.1.0-add-tempest-run-3d0aaf69c2ca4115.yaml
diff --git a/releasenotes/notes/12.1.0-add-tempest-workspaces-228a2ba4690b5589.yaml b/releasenotes/notes/12/12.1.0-add-tempest-workspaces-228a2ba4690b5589.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-add-tempest-workspaces-228a2ba4690b5589.yaml
rename to releasenotes/notes/12/12.1.0-add-tempest-workspaces-228a2ba4690b5589.yaml
diff --git a/releasenotes/notes/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml b/releasenotes/notes/12/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml
rename to releasenotes/notes/12/12.1.0-add_subunit_describe_calls-5498a37e6cd66c4b.yaml
diff --git a/releasenotes/notes/12.1.0-bug-1486834-7ebca15836ae27a9.yaml b/releasenotes/notes/12/12.1.0-bug-1486834-7ebca15836ae27a9.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-bug-1486834-7ebca15836ae27a9.yaml
rename to releasenotes/notes/12/12.1.0-bug-1486834-7ebca15836ae27a9.yaml
diff --git a/releasenotes/notes/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml b/releasenotes/notes/12/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml
rename to releasenotes/notes/12/12.1.0-identity-clients-as-library-e663c6132fcac6c2.yaml
diff --git a/releasenotes/notes/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml b/releasenotes/notes/12/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml
rename to releasenotes/notes/12/12.1.0-image-clients-as-library-86d17caa26ce3961.yaml
diff --git a/releasenotes/notes/12.1.0-new-test-utils-module-adf34468c4d52719.yaml b/releasenotes/notes/12/12.1.0-new-test-utils-module-adf34468c4d52719.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-new-test-utils-module-adf34468c4d52719.yaml
rename to releasenotes/notes/12/12.1.0-new-test-utils-module-adf34468c4d52719.yaml
diff --git a/releasenotes/notes/12.1.0-remove-input-scenarios-functionality-01308e6d4307f580.yaml b/releasenotes/notes/12/12.1.0-remove-input-scenarios-functionality-01308e6d4307f580.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-remove-input-scenarios-functionality-01308e6d4307f580.yaml
rename to releasenotes/notes/12/12.1.0-remove-input-scenarios-functionality-01308e6d4307f580.yaml
diff --git a/releasenotes/notes/12.1.0-remove-integrated-horizon-bb57551c1e5f5be3.yaml b/releasenotes/notes/12/12.1.0-remove-integrated-horizon-bb57551c1e5f5be3.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-remove-integrated-horizon-bb57551c1e5f5be3.yaml
rename to releasenotes/notes/12/12.1.0-remove-integrated-horizon-bb57551c1e5f5be3.yaml
diff --git a/releasenotes/notes/12.1.0-remove-legacy-credential-providers-3d653ac3ba1ada2b.yaml b/releasenotes/notes/12/12.1.0-remove-legacy-credential-providers-3d653ac3ba1ada2b.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-remove-legacy-credential-providers-3d653ac3ba1ada2b.yaml
rename to releasenotes/notes/12/12.1.0-remove-legacy-credential-providers-3d653ac3ba1ada2b.yaml
diff --git a/releasenotes/notes/12.1.0-remove-trove-tests-666522e9113549f9.yaml b/releasenotes/notes/12/12.1.0-remove-trove-tests-666522e9113549f9.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-remove-trove-tests-666522e9113549f9.yaml
rename to releasenotes/notes/12/12.1.0-remove-trove-tests-666522e9113549f9.yaml
diff --git a/releasenotes/notes/12.1.0-routers-client-as-library-25a363379da351f6.yaml b/releasenotes/notes/12/12.1.0-routers-client-as-library-25a363379da351f6.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-routers-client-as-library-25a363379da351f6.yaml
rename to releasenotes/notes/12/12.1.0-routers-client-as-library-25a363379da351f6.yaml
diff --git a/releasenotes/notes/12.1.0-support-chunked-encoding-d71f53225f68edf3.yaml b/releasenotes/notes/12/12.1.0-support-chunked-encoding-d71f53225f68edf3.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-support-chunked-encoding-d71f53225f68edf3.yaml
rename to releasenotes/notes/12/12.1.0-support-chunked-encoding-d71f53225f68edf3.yaml
diff --git a/releasenotes/notes/12.1.0-tempest-init-global-config-dir-location-changes-12260255871d3a2b.yaml b/releasenotes/notes/12/12.1.0-tempest-init-global-config-dir-location-changes-12260255871d3a2b.yaml
similarity index 100%
rename from releasenotes/notes/12.1.0-tempest-init-global-config-dir-location-changes-12260255871d3a2b.yaml
rename to releasenotes/notes/12/12.1.0-tempest-init-global-config-dir-location-changes-12260255871d3a2b.yaml
diff --git a/releasenotes/notes/12.2.0-add-httptimeout-in-restclient-ax78061900e3f3d7.yaml b/releasenotes/notes/12/12.2.0-add-httptimeout-in-restclient-ax78061900e3f3d7.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-add-httptimeout-in-restclient-ax78061900e3f3d7.yaml
rename to releasenotes/notes/12/12.2.0-add-httptimeout-in-restclient-ax78061900e3f3d7.yaml
diff --git a/releasenotes/notes/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml b/releasenotes/notes/12/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml
rename to releasenotes/notes/12/12.2.0-add-new-identity-clients-3c3afd674a395bde.yaml
diff --git a/releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml b/releasenotes/notes/12/12.2.0-clients_module-16f3025f515bf9ec.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-clients_module-16f3025f515bf9ec.yaml
rename to releasenotes/notes/12/12.2.0-clients_module-16f3025f515bf9ec.yaml
diff --git a/releasenotes/notes/12.2.0-nova_cert_default-90eb7c1e3cde624a.yaml b/releasenotes/notes/12/12.2.0-nova_cert_default-90eb7c1e3cde624a.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-nova_cert_default-90eb7c1e3cde624a.yaml
rename to releasenotes/notes/12/12.2.0-nova_cert_default-90eb7c1e3cde624a.yaml
diff --git a/releasenotes/notes/12.2.0-plugin-service-client-registration-00b19a2dd4935ba0.yaml b/releasenotes/notes/12/12.2.0-plugin-service-client-registration-00b19a2dd4935ba0.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-plugin-service-client-registration-00b19a2dd4935ba0.yaml
rename to releasenotes/notes/12/12.2.0-plugin-service-client-registration-00b19a2dd4935ba0.yaml
diff --git a/releasenotes/notes/12.2.0-remove-javelin-276f62d04f7e4a1d.yaml b/releasenotes/notes/12/12.2.0-remove-javelin-276f62d04f7e4a1d.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-remove-javelin-276f62d04f7e4a1d.yaml
rename to releasenotes/notes/12/12.2.0-remove-javelin-276f62d04f7e4a1d.yaml
diff --git a/releasenotes/notes/12.2.0-service_client_config-8a1d7b4de769c633.yaml b/releasenotes/notes/12/12.2.0-service_client_config-8a1d7b4de769c633.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-service_client_config-8a1d7b4de769c633.yaml
rename to releasenotes/notes/12/12.2.0-service_client_config-8a1d7b4de769c633.yaml
diff --git a/releasenotes/notes/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml b/releasenotes/notes/12/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml
similarity index 100%
rename from releasenotes/notes/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml
rename to releasenotes/notes/12/12.2.0-volume-clients-as-library-9a3444dd63c134b3.yaml
diff --git a/releasenotes/notes/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml b/releasenotes/notes/13/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml
rename to releasenotes/notes/13/13.0.0-add-new-identity-clients-as-library-5f7ndha733nwdsn9.yaml
diff --git a/releasenotes/notes/13.0.0-add-volume-clients-as-a-library-d05b6bc35e66c6ef.yaml b/releasenotes/notes/13/13.0.0-add-volume-clients-as-a-library-d05b6bc35e66c6ef.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-add-volume-clients-as-a-library-d05b6bc35e66c6ef.yaml
rename to releasenotes/notes/13/13.0.0-add-volume-clients-as-a-library-d05b6bc35e66c6ef.yaml
diff --git a/releasenotes/notes/13.0.0-deprecate-get_ipv6_addr_by_EUI64-4673f07677289cf6.yaml b/releasenotes/notes/13/13.0.0-deprecate-get_ipv6_addr_by_EUI64-4673f07677289cf6.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-deprecate-get_ipv6_addr_by_EUI64-4673f07677289cf6.yaml
rename to releasenotes/notes/13/13.0.0-deprecate-get_ipv6_addr_by_EUI64-4673f07677289cf6.yaml
diff --git a/releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml b/releasenotes/notes/13/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
rename to releasenotes/notes/13/13.0.0-move-call-until-true-to-tempest-lib-c9ea70dd6fe9bd15.yaml
diff --git a/releasenotes/notes/13.0.0-start-of-newton-support-3ebb274f300f28eb.yaml b/releasenotes/notes/13/13.0.0-start-of-newton-support-3ebb274f300f28eb.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-start-of-newton-support-3ebb274f300f28eb.yaml
rename to releasenotes/notes/13/13.0.0-start-of-newton-support-3ebb274f300f28eb.yaml
diff --git a/releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml b/releasenotes/notes/13/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
rename to releasenotes/notes/13/13.0.0-tempest-cleanup-nostandalone-39df2aafb2545d35.yaml
diff --git a/releasenotes/notes/13.0.0-volume-clients-as-library-660811011be29d1a.yaml b/releasenotes/notes/13/13.0.0-volume-clients-as-library-660811011be29d1a.yaml
similarity index 100%
rename from releasenotes/notes/13.0.0-volume-clients-as-library-660811011be29d1a.yaml
rename to releasenotes/notes/13/13.0.0-volume-clients-as-library-660811011be29d1a.yaml
diff --git a/releasenotes/notes/14.0.0-add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml b/releasenotes/notes/14/14.0.0-add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
rename to releasenotes/notes/14/14.0.0-add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
diff --git a/releasenotes/notes/14.0.0-add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml b/releasenotes/notes/14/14.0.0-add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml
rename to releasenotes/notes/14/14.0.0-add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml
diff --git a/releasenotes/notes/14.0.0-add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml b/releasenotes/notes/14/14.0.0-add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml
rename to releasenotes/notes/14/14.0.0-add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml
diff --git a/releasenotes/notes/14.0.0-add-image-clients-af94564fb34ddca6.yaml b/releasenotes/notes/14/14.0.0-add-image-clients-af94564fb34ddca6.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-image-clients-af94564fb34ddca6.yaml
rename to releasenotes/notes/14/14.0.0-add-image-clients-af94564fb34ddca6.yaml
diff --git a/releasenotes/notes/14.0.0-add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/14/14.0.0-add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml
rename to releasenotes/notes/14/14.0.0-add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml
diff --git a/releasenotes/notes/14.0.0-add-service-provider-client-cbba77d424a30dd3.yaml b/releasenotes/notes/14/14.0.0-add-service-provider-client-cbba77d424a30dd3.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-service-provider-client-cbba77d424a30dd3.yaml
rename to releasenotes/notes/14/14.0.0-add-service-provider-client-cbba77d424a30dd3.yaml
diff --git a/releasenotes/notes/14.0.0-add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml b/releasenotes/notes/14/14.0.0-add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
rename to releasenotes/notes/14/14.0.0-add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
diff --git a/releasenotes/notes/14.0.0-deprecate-nova-api-extensions-df16b02485dae203.yaml b/releasenotes/notes/14/14.0.0-deprecate-nova-api-extensions-df16b02485dae203.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-deprecate-nova-api-extensions-df16b02485dae203.yaml
rename to releasenotes/notes/14/14.0.0-deprecate-nova-api-extensions-df16b02485dae203.yaml
diff --git a/releasenotes/notes/14.0.0-move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml b/releasenotes/notes/14/14.0.0-move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml
rename to releasenotes/notes/14/14.0.0-move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml
diff --git a/releasenotes/notes/14.0.0-new-volume-limit-client-517c17d9090f4df4.yaml b/releasenotes/notes/14/14.0.0-new-volume-limit-client-517c17d9090f4df4.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-new-volume-limit-client-517c17d9090f4df4.yaml
rename to releasenotes/notes/14/14.0.0-new-volume-limit-client-517c17d9090f4df4.yaml
diff --git a/releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml b/releasenotes/notes/14/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
rename to releasenotes/notes/14/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
diff --git a/releasenotes/notes/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml b/releasenotes/notes/14/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml
rename to releasenotes/notes/14/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml
diff --git a/releasenotes/notes/14.0.0-remove-bootable-option-024f8944c056a3e0.yaml b/releasenotes/notes/14/14.0.0-remove-bootable-option-024f8944c056a3e0.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-remove-bootable-option-024f8944c056a3e0.yaml
rename to releasenotes/notes/14/14.0.0-remove-bootable-option-024f8944c056a3e0.yaml
diff --git a/releasenotes/notes/14.0.0-remove-negative-test-generator-1653f4c0f86ccf75.yaml b/releasenotes/notes/14/14.0.0-remove-negative-test-generator-1653f4c0f86ccf75.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-remove-negative-test-generator-1653f4c0f86ccf75.yaml
rename to releasenotes/notes/14/14.0.0-remove-negative-test-generator-1653f4c0f86ccf75.yaml
diff --git a/releasenotes/notes/14.0.0-remove-sahara-tests-1532c47c7df80e3a.yaml b/releasenotes/notes/14/14.0.0-remove-sahara-tests-1532c47c7df80e3a.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-remove-sahara-tests-1532c47c7df80e3a.yaml
rename to releasenotes/notes/14/14.0.0-remove-sahara-tests-1532c47c7df80e3a.yaml
diff --git a/releasenotes/notes/14.0.0-volume-clients-as-library-309030c7a16e62ab.yaml b/releasenotes/notes/14/14.0.0-volume-clients-as-library-309030c7a16e62ab.yaml
similarity index 100%
rename from releasenotes/notes/14.0.0-volume-clients-as-library-309030c7a16e62ab.yaml
rename to releasenotes/notes/14/14.0.0-volume-clients-as-library-309030c7a16e62ab.yaml
diff --git a/releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/15/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
rename to releasenotes/notes/15/15.0.0-add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
diff --git a/releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml b/releasenotes/notes/15/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
rename to releasenotes/notes/15/15.0.0-add-image-clients-tests-49dbc0a0a4281a77.yaml
diff --git a/releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml b/releasenotes/notes/15/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
rename to releasenotes/notes/15/15.0.0-add-implied-roles-to-roles-client-library-edf96408ad9ba82e.yaml
diff --git a/releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml b/releasenotes/notes/15/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
rename to releasenotes/notes/15/15.0.0-add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
diff --git a/releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml b/releasenotes/notes/15/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
rename to releasenotes/notes/15/15.0.0-deprecate-allow_port_security_disabled-option-2d3d87f6bd11d03a.yaml
diff --git a/releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml b/releasenotes/notes/15/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
rename to releasenotes/notes/15/15.0.0-deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
diff --git a/releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml b/releasenotes/notes/15/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
rename to releasenotes/notes/15/15.0.0-deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
diff --git a/releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml b/releasenotes/notes/15/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
rename to releasenotes/notes/15/15.0.0-jsonschema-validator-2377ba131e12d3c7.yaml
diff --git a/releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml b/releasenotes/notes/15/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
rename to releasenotes/notes/15/15.0.0-remove-deprecated-compute-microversion-config-options-eaee6a7d2f8390a8.yaml
diff --git a/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml b/releasenotes/notes/15/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
rename to releasenotes/notes/15/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
diff --git a/releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml b/releasenotes/notes/15/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
rename to releasenotes/notes/15/15.0.0-remove-deprecated-input-scenario-config-options-414e0c5442e967e9.yaml
diff --git a/releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml b/releasenotes/notes/15/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
similarity index 100%
rename from releasenotes/notes/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
rename to releasenotes/notes/15/15.0.0-remove-deprecated-network-config-options-f9ce276231578fe6.yaml
diff --git a/releasenotes/notes/16.0.0-add-OAUTH-Consumer-Client-tempest-tests-db1df7aae4a9fd4e.yaml b/releasenotes/notes/16/16.0.0-add-OAUTH-Consumer-Client-tempest-tests-db1df7aae4a9fd4e.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-OAUTH-Consumer-Client-tempest-tests-db1df7aae4a9fd4e.yaml
rename to releasenotes/notes/16/16.0.0-add-OAUTH-Consumer-Client-tempest-tests-db1df7aae4a9fd4e.yaml
diff --git a/releasenotes/notes/16.0.0-add-additional-methods-to-roles-client-library-178d4a6000dec72d.yaml b/releasenotes/notes/16/16.0.0-add-additional-methods-to-roles-client-library-178d4a6000dec72d.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-additional-methods-to-roles-client-library-178d4a6000dec72d.yaml
rename to releasenotes/notes/16/16.0.0-add-additional-methods-to-roles-client-library-178d4a6000dec72d.yaml
diff --git a/releasenotes/notes/16.0.0-add-cascade-parameter-to-volumes-client-ff4f7f12795003a4.yaml b/releasenotes/notes/16/16.0.0-add-cascade-parameter-to-volumes-client-ff4f7f12795003a4.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-cascade-parameter-to-volumes-client-ff4f7f12795003a4.yaml
rename to releasenotes/notes/16/16.0.0-add-cascade-parameter-to-volumes-client-ff4f7f12795003a4.yaml
diff --git a/releasenotes/notes/16.0.0-add-compute-server-evaculate-client-as-a-library-ed76baf25f02c3ca.yaml b/releasenotes/notes/16/16.0.0-add-compute-server-evaculate-client-as-a-library-ed76baf25f02c3ca.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-compute-server-evaculate-client-as-a-library-ed76baf25f02c3ca.yaml
rename to releasenotes/notes/16/16.0.0-add-compute-server-evaculate-client-as-a-library-ed76baf25f02c3ca.yaml
diff --git a/releasenotes/notes/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml b/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
rename to releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
diff --git a/releasenotes/notes/16.0.0-add-list-auth-project-client-5905076d914a3943.yaml b/releasenotes/notes/16/16.0.0-add-list-auth-project-client-5905076d914a3943.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-list-auth-project-client-5905076d914a3943.yaml
rename to releasenotes/notes/16/16.0.0-add-list-auth-project-client-5905076d914a3943.yaml
diff --git a/releasenotes/notes/16.0.0-add-list-glance-api-versions-ec5fc8081fc8a0ae.yaml b/releasenotes/notes/16/16.0.0-add-list-glance-api-versions-ec5fc8081fc8a0ae.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-list-glance-api-versions-ec5fc8081fc8a0ae.yaml
rename to releasenotes/notes/16/16.0.0-add-list-glance-api-versions-ec5fc8081fc8a0ae.yaml
diff --git a/releasenotes/notes/16.0.0-add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml b/releasenotes/notes/16/16.0.0-add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
rename to releasenotes/notes/16/16.0.0-add-list-security-groups-by-servers-to-servers-client-library-088df48f6d81f4be.yaml
diff --git a/releasenotes/notes/16.0.0-add-list-version-to-identity-client-944cb7396088a575.yaml b/releasenotes/notes/16/16.0.0-add-list-version-to-identity-client-944cb7396088a575.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-list-version-to-identity-client-944cb7396088a575.yaml
rename to releasenotes/notes/16/16.0.0-add-list-version-to-identity-client-944cb7396088a575.yaml
diff --git a/releasenotes/notes/16.0.0-add-list-version-to-volume-client-4769dd1bd4ab9c5e.yaml b/releasenotes/notes/16/16.0.0-add-list-version-to-volume-client-4769dd1bd4ab9c5e.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-list-version-to-volume-client-4769dd1bd4ab9c5e.yaml
rename to releasenotes/notes/16/16.0.0-add-list-version-to-volume-client-4769dd1bd4ab9c5e.yaml
diff --git a/releasenotes/notes/16.0.0-add-quota-sets-detail-kwarg-74b72183295b3ce7.yaml b/releasenotes/notes/16/16.0.0-add-quota-sets-detail-kwarg-74b72183295b3ce7.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-quota-sets-detail-kwarg-74b72183295b3ce7.yaml
rename to releasenotes/notes/16/16.0.0-add-quota-sets-detail-kwarg-74b72183295b3ce7.yaml
diff --git a/releasenotes/notes/16.0.0-add-tempest-lib-remote-client-adbeb3f42a36910b.yaml b/releasenotes/notes/16/16.0.0-add-tempest-lib-remote-client-adbeb3f42a36910b.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-tempest-lib-remote-client-adbeb3f42a36910b.yaml
rename to releasenotes/notes/16/16.0.0-add-tempest-lib-remote-client-adbeb3f42a36910b.yaml
diff --git a/releasenotes/notes/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml b/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
rename to releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
diff --git a/releasenotes/notes/16.0.0-add-update-encryption-type-to-encryption-types-client-f3093532a0bcf9a1.yaml b/releasenotes/notes/16/16.0.0-add-update-encryption-type-to-encryption-types-client-f3093532a0bcf9a1.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-update-encryption-type-to-encryption-types-client-f3093532a0bcf9a1.yaml
rename to releasenotes/notes/16/16.0.0-add-update-encryption-type-to-encryption-types-client-f3093532a0bcf9a1.yaml
diff --git a/releasenotes/notes/16.0.0-add-volume-manage-client-as-library-78ab198a1dc1bd41.yaml b/releasenotes/notes/16/16.0.0-add-volume-manage-client-as-library-78ab198a1dc1bd41.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-add-volume-manage-client-as-library-78ab198a1dc1bd41.yaml
rename to releasenotes/notes/16/16.0.0-add-volume-manage-client-as-library-78ab198a1dc1bd41.yaml
diff --git a/releasenotes/notes/16.0.0-create-server-tags-client-8c0042a77e859af6.yaml b/releasenotes/notes/16/16.0.0-create-server-tags-client-8c0042a77e859af6.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-create-server-tags-client-8c0042a77e859af6.yaml
rename to releasenotes/notes/16/16.0.0-create-server-tags-client-8c0042a77e859af6.yaml
diff --git a/releasenotes/notes/16.0.0-deprecate-deactivate_image-config-7a282c471937bbcb.yaml b/releasenotes/notes/16/16.0.0-deprecate-deactivate_image-config-7a282c471937bbcb.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecate-deactivate_image-config-7a282c471937bbcb.yaml
rename to releasenotes/notes/16/16.0.0-deprecate-deactivate_image-config-7a282c471937bbcb.yaml
diff --git a/releasenotes/notes/16.0.0-deprecate-dvr_extra_resources-config-8c319d6dab7f7e5c.yaml b/releasenotes/notes/16/16.0.0-deprecate-dvr_extra_resources-config-8c319d6dab7f7e5c.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecate-dvr_extra_resources-config-8c319d6dab7f7e5c.yaml
rename to releasenotes/notes/16/16.0.0-deprecate-dvr_extra_resources-config-8c319d6dab7f7e5c.yaml
diff --git a/releasenotes/notes/16.0.0-deprecate-glance-api-version-config-options-8370b63aea8e14cf.yaml b/releasenotes/notes/16/16.0.0-deprecate-glance-api-version-config-options-8370b63aea8e14cf.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecate-glance-api-version-config-options-8370b63aea8e14cf.yaml
rename to releasenotes/notes/16/16.0.0-deprecate-glance-api-version-config-options-8370b63aea8e14cf.yaml
diff --git a/releasenotes/notes/16.0.0-deprecate-resources-prefix-option-ad490c0a30a0266b.yaml b/releasenotes/notes/16/16.0.0-deprecate-resources-prefix-option-ad490c0a30a0266b.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecate-resources-prefix-option-ad490c0a30a0266b.yaml
rename to releasenotes/notes/16/16.0.0-deprecate-resources-prefix-option-ad490c0a30a0266b.yaml
diff --git a/releasenotes/notes/16.0.0-deprecate-skip_unless_attr-decorator-450a1ed727494724.yaml b/releasenotes/notes/16/16.0.0-deprecate-skip_unless_attr-decorator-450a1ed727494724.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecate-skip_unless_attr-decorator-450a1ed727494724.yaml
rename to releasenotes/notes/16/16.0.0-deprecate-skip_unless_attr-decorator-450a1ed727494724.yaml
diff --git a/releasenotes/notes/16.0.0-deprecate-skip_unless_config-decorator-64c32d588043ab12.yaml b/releasenotes/notes/16/16.0.0-deprecate-skip_unless_config-decorator-64c32d588043ab12.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecate-skip_unless_config-decorator-64c32d588043ab12.yaml
rename to releasenotes/notes/16/16.0.0-deprecate-skip_unless_config-decorator-64c32d588043ab12.yaml
diff --git a/releasenotes/notes/16.0.0-deprecated-cinder-api-v1-option-df7d5a54d93db5cf.yaml b/releasenotes/notes/16/16.0.0-deprecated-cinder-api-v1-option-df7d5a54d93db5cf.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-deprecated-cinder-api-v1-option-df7d5a54d93db5cf.yaml
rename to releasenotes/notes/16/16.0.0-deprecated-cinder-api-v1-option-df7d5a54d93db5cf.yaml
diff --git a/releasenotes/notes/16.0.0-dreprecate_client_parameters-cb8d069e62957f7e.yaml b/releasenotes/notes/16/16.0.0-dreprecate_client_parameters-cb8d069e62957f7e.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-dreprecate_client_parameters-cb8d069e62957f7e.yaml
rename to releasenotes/notes/16/16.0.0-dreprecate_client_parameters-cb8d069e62957f7e.yaml
diff --git a/releasenotes/notes/16.0.0-fix-volume-v2-service-clients-bugfix-1667354-73d2c3c8fedc08bf.yaml b/releasenotes/notes/16/16.0.0-fix-volume-v2-service-clients-bugfix-1667354-73d2c3c8fedc08bf.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-fix-volume-v2-service-clients-bugfix-1667354-73d2c3c8fedc08bf.yaml
rename to releasenotes/notes/16/16.0.0-fix-volume-v2-service-clients-bugfix-1667354-73d2c3c8fedc08bf.yaml
diff --git a/releasenotes/notes/16.0.0-mitaka-eol-88ff8355fff81b55.yaml b/releasenotes/notes/16/16.0.0-mitaka-eol-88ff8355fff81b55.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-mitaka-eol-88ff8355fff81b55.yaml
rename to releasenotes/notes/16/16.0.0-mitaka-eol-88ff8355fff81b55.yaml
diff --git a/releasenotes/notes/16.0.0-remove-call_until_true-of-test-de9c13bc8f969921.yaml b/releasenotes/notes/16/16.0.0-remove-call_until_true-of-test-de9c13bc8f969921.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-call_until_true-of-test-de9c13bc8f969921.yaml
rename to releasenotes/notes/16/16.0.0-remove-call_until_true-of-test-de9c13bc8f969921.yaml
diff --git a/releasenotes/notes/16.0.0-remove-cinder-v1-api-tests-71e266b8d55d475f.yaml b/releasenotes/notes/16/16.0.0-remove-cinder-v1-api-tests-71e266b8d55d475f.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-cinder-v1-api-tests-71e266b8d55d475f.yaml
rename to releasenotes/notes/16/16.0.0-remove-cinder-v1-api-tests-71e266b8d55d475f.yaml
diff --git a/releasenotes/notes/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
rename to releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
diff --git a/releasenotes/notes/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
rename to releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
diff --git a/releasenotes/notes/16.0.0-remove-deprecated-dvr_extra_resources-option-e8c441c38eab7ddd.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-dvr_extra_resources-option-e8c441c38eab7ddd.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-deprecated-dvr_extra_resources-option-e8c441c38eab7ddd.yaml
rename to releasenotes/notes/16/16.0.0-remove-deprecated-dvr_extra_resources-option-e8c441c38eab7ddd.yaml
diff --git a/releasenotes/notes/16.0.0-remove-deprecated-identity-reseller-option-4411c7e3951f1094.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-identity-reseller-option-4411c7e3951f1094.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-deprecated-identity-reseller-option-4411c7e3951f1094.yaml
rename to releasenotes/notes/16/16.0.0-remove-deprecated-identity-reseller-option-4411c7e3951f1094.yaml
diff --git a/releasenotes/notes/16.0.0-remove-sahara-service-available-44a642aa9c634ab4.yaml b/releasenotes/notes/16/16.0.0-remove-sahara-service-available-44a642aa9c634ab4.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-sahara-service-available-44a642aa9c634ab4.yaml
rename to releasenotes/notes/16/16.0.0-remove-sahara-service-available-44a642aa9c634ab4.yaml
diff --git a/releasenotes/notes/16.0.0-remove-volume_feature_enabled.volume_services-c6aa142cc1021297.yaml b/releasenotes/notes/16/16.0.0-remove-volume_feature_enabled.volume_services-c6aa142cc1021297.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-remove-volume_feature_enabled.volume_services-c6aa142cc1021297.yaml
rename to releasenotes/notes/16/16.0.0-remove-volume_feature_enabled.volume_services-c6aa142cc1021297.yaml
diff --git a/releasenotes/notes/16.0.0-use-keystone-v3-api-935860d30ddbb8e9.yaml b/releasenotes/notes/16/16.0.0-use-keystone-v3-api-935860d30ddbb8e9.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-use-keystone-v3-api-935860d30ddbb8e9.yaml
rename to releasenotes/notes/16/16.0.0-use-keystone-v3-api-935860d30ddbb8e9.yaml
diff --git a/releasenotes/notes/16.0.0-volume-transfers-client-e5ed3f5464c0cdc0.yaml b/releasenotes/notes/16/16.0.0-volume-transfers-client-e5ed3f5464c0cdc0.yaml
similarity index 100%
rename from releasenotes/notes/16.0.0-volume-transfers-client-e5ed3f5464c0cdc0.yaml
rename to releasenotes/notes/16/16.0.0-volume-transfers-client-e5ed3f5464c0cdc0.yaml
diff --git a/releasenotes/notes/add-create-group-from-src-tempest-tests-9eb8b0b4b5c52055.yaml b/releasenotes/notes/add-create-group-from-src-tempest-tests-9eb8b0b4b5c52055.yaml
new file mode 100644
index 0000000..dec4a27
--- /dev/null
+++ b/releasenotes/notes/add-create-group-from-src-tempest-tests-9eb8b0b4b5c52055.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add create_group_from_source to groups_client in the volume service library.
diff --git a/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml b/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
new file mode 100644
index 0000000..87a6af9
--- /dev/null
+++ b/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
@@ -0,0 +1,17 @@
+---
+fixes:
+ - |
+ Allow to specify new domain parameters:
+
+ * `user_domain_name`
+ * `user_domain_id`
+ * `project_domain_name`
+ * `project_domain_id`
+
+ for CLIClient class, whose values will be substituted to
+ ``--os-user-domain-name``, ``--os-user-domain-id``,
+ ``--os-project-domain-name`` and ``--os-project-domain-id`` respectively
+ during command execution.
+
+ This allows to prevent possible test failures with authentication in
+ Keystone v3. Bug: #1719687
diff --git a/releasenotes/notes/add-ip-version-check-in-addresses-x491ac6d9abaxa12.yaml b/releasenotes/notes/add-ip-version-check-in-addresses-x491ac6d9abaxa12.yaml
new file mode 100644
index 0000000..957e903
--- /dev/null
+++ b/releasenotes/notes/add-ip-version-check-in-addresses-x491ac6d9abaxa12.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ Add more accurate ip version check in addresses schema which will
+ limit the ip version value in [4, 6].
diff --git a/releasenotes/notes/add-is-resource-deleted-sg-client-f4a7a7a54ff024d7.yaml b/releasenotes/notes/add-is-resource-deleted-sg-client-f4a7a7a54ff024d7.yaml
new file mode 100644
index 0000000..e046326
--- /dev/null
+++ b/releasenotes/notes/add-is-resource-deleted-sg-client-f4a7a7a54ff024d7.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Implement the `rest_client` method `is_resource_deleted` in the network
+ security group client.
diff --git a/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
new file mode 100644
index 0000000..403bbad
--- /dev/null
+++ b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds a new cli option to tempest run, --load-list <list-file>
+ to specify target tests to run from a list-file. The list-file
+ supports the output format of the tempest run --list-tests
+ command.
diff --git a/releasenotes/notes/add-params-to-v2-list-backups-api-c088d2b4bfe90247.yaml b/releasenotes/notes/add-params-to-v2-list-backups-api-c088d2b4bfe90247.yaml
new file mode 100644
index 0000000..cee2d76
--- /dev/null
+++ b/releasenotes/notes/add-params-to-v2-list-backups-api-c088d2b4bfe90247.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The ``list_backups`` method of the v2 ``BackupsClient`` class now has
+ an additional ``**params`` argument that enables passing additional
+ information in the query string of the HTTP request.
diff --git a/releasenotes/notes/add-reset-group-snapshot-status-api-to-v3-group-snapshots-client-248d41827daf2a0c.yaml b/releasenotes/notes/add-reset-group-snapshot-status-api-to-v3-group-snapshots-client-248d41827daf2a0c.yaml
new file mode 100644
index 0000000..76b395d
--- /dev/null
+++ b/releasenotes/notes/add-reset-group-snapshot-status-api-to-v3-group-snapshots-client-248d41827daf2a0c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add reset group snapshot status API to v3 group_snapshots_client library,
+ min_microversion of this API is 3.19. This feature enables the possibility
+ to reset group snapshot status.
diff --git a/releasenotes/notes/add-reset-group-status-api-to-v3-groups-client-9aa048617c66756a.yaml b/releasenotes/notes/add-reset-group-status-api-to-v3-groups-client-9aa048617c66756a.yaml
new file mode 100644
index 0000000..a39c23b
--- /dev/null
+++ b/releasenotes/notes/add-reset-group-status-api-to-v3-groups-client-9aa048617c66756a.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add reset group status API to v3 groups_client library, min_microversion
+ of this API is 3.20. This feature enables the possibility to reset group
+ status.
diff --git a/releasenotes/notes/add-return-value-to-retype-volume-a401aa619aaa2457.yaml b/releasenotes/notes/add-return-value-to-retype-volume-a401aa619aaa2457.yaml
new file mode 100644
index 0000000..ca42014
--- /dev/null
+++ b/releasenotes/notes/add-return-value-to-retype-volume-a401aa619aaa2457.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Add a missing return statement to the retype_volume API in the v2
+ volumes_client library: Bug#1703997
+
+ This changes the response body from None to an empty dictionary.
diff --git a/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml b/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
new file mode 100644
index 0000000..8fdf4f0
--- /dev/null
+++ b/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add --save-state option to allow saving state of cloud before tempest run.
diff --git a/releasenotes/notes/add-show-snapshot-metadata-item-api-to-v2-snapshots-client-bd3cbab3c7f0e0b3.yaml b/releasenotes/notes/add-show-snapshot-metadata-item-api-to-v2-snapshots-client-bd3cbab3c7f0e0b3.yaml
new file mode 100644
index 0000000..140df60
--- /dev/null
+++ b/releasenotes/notes/add-show-snapshot-metadata-item-api-to-v2-snapshots-client-bd3cbab3c7f0e0b3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show snapshot metadata item API to v2 snapshots_client library.
+ This feature enables the possibility to show a snapshot's metadata for
+ a specific key.
diff --git a/releasenotes/notes/add-show-volume-image-metadata-api-to-v2-volumes-client-ee3c027f35276561.yaml b/releasenotes/notes/add-show-volume-image-metadata-api-to-v2-volumes-client-ee3c027f35276561.yaml
new file mode 100644
index 0000000..ac7c74e
--- /dev/null
+++ b/releasenotes/notes/add-show-volume-image-metadata-api-to-v2-volumes-client-ee3c027f35276561.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add show volume image metadata API to v2 volumes_client library.
+ This feature enables the possibility to show volume's image metadata.
diff --git a/releasenotes/notes/add-show-volume-metadata-item-api-to-v2-volumes-client-47d59ecd999ca9df.yaml b/releasenotes/notes/add-show-volume-metadata-item-api-to-v2-volumes-client-47d59ecd999ca9df.yaml
new file mode 100644
index 0000000..49a935c
--- /dev/null
+++ b/releasenotes/notes/add-show-volume-metadata-item-api-to-v2-volumes-client-47d59ecd999ca9df.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show volume metadata item API to v2 volumes_client library.
+ This feature enables the possibility to show a volume's metadata for
+ a specific key.
diff --git a/releasenotes/notes/add-support-args-kwargs-in-call-until-true-a91k592h5a64exf7.yaml b/releasenotes/notes/add-support-args-kwargs-in-call-until-true-a91k592h5a64exf7.yaml
new file mode 100644
index 0000000..e23abe3
--- /dev/null
+++ b/releasenotes/notes/add-support-args-kwargs-in-call-until-true-a91k592h5a64exf7.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add support of args and kwargs when calling func in call_until_true,
+ also to log the cost time when call_until_true returns True or False
+ for debuggin.
diff --git a/releasenotes/notes/add-update-backup-api-to-v3-backups-client-e8465b2b66617dc0.yaml b/releasenotes/notes/add-update-backup-api-to-v3-backups-client-e8465b2b66617dc0.yaml
new file mode 100644
index 0000000..7cd6887
--- /dev/null
+++ b/releasenotes/notes/add-update-backup-api-to-v3-backups-client-e8465b2b66617dc0.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Define v3 backups_client for the volume service as a library interface,
+ allowing other projects to use this module as a stable library without
+ maintenance changes.
+ Add update backup API to v3 backups_client library, min_microversion
+ of this API is 3.9.
+
+ * backups_client(v3)
diff --git a/releasenotes/notes/add-update-group-tempest-tests-72f8ec19b2809849.yaml b/releasenotes/notes/add-update-group-tempest-tests-72f8ec19b2809849.yaml
new file mode 100644
index 0000000..23c30af
--- /dev/null
+++ b/releasenotes/notes/add-update-group-tempest-tests-72f8ec19b2809849.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add update_group to groups_client in the volume service library.
diff --git a/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml b/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml
new file mode 100644
index 0000000..7814f4e
--- /dev/null
+++ b/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Add the `validation_resources` module to tempest.lib. The module provides
+ a set of helpers that can be used to provision and cleanup all the
+ resources required to perform ping / ssh tests against a virtual machine:
+ a keypair, a security group with targeted rules and a floating IP.
diff --git a/releasenotes/notes/add-volume-group-snapshots-tempest-tests-840df3da26590f5e.yaml b/releasenotes/notes/add-volume-group-snapshots-tempest-tests-840df3da26590f5e.yaml
new file mode 100644
index 0000000..2ca6e5a
--- /dev/null
+++ b/releasenotes/notes/add-volume-group-snapshots-tempest-tests-840df3da26590f5e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add group_snapshots client for the volume service as library.
+ Add tempest tests for create group snapshot, delete group snapshot, show
+ group snapshot, and list group snapshots volume APIs.
diff --git a/releasenotes/notes/add-volume-group-types-tempest-tests-1298ab8cb4fe8b7b.yaml b/releasenotes/notes/add-volume-group-types-tempest-tests-1298ab8cb4fe8b7b.yaml
new file mode 100644
index 0000000..4fd3bee
--- /dev/null
+++ b/releasenotes/notes/add-volume-group-types-tempest-tests-1298ab8cb4fe8b7b.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add list_group_type and show_group_type in the group_types client for
+ the volume service. Add tests for create/delete/show/list group types.
diff --git a/releasenotes/notes/add_proxy_url_get_credentials-aef66b085450513f.yaml b/releasenotes/notes/add_proxy_url_get_credentials-aef66b085450513f.yaml
new file mode 100644
index 0000000..94ab462
--- /dev/null
+++ b/releasenotes/notes/add_proxy_url_get_credentials-aef66b085450513f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add the proxy_url optional parameter to the get_credentials method in
+ tempest/lib/auth.py so that that helper can be used when going through
+ and HTTP proxy.
diff --git a/releasenotes/notes/compare-header-version-func-de5139b2161b3627.yaml b/releasenotes/notes/compare-header-version-func-de5139b2161b3627.yaml
new file mode 100644
index 0000000..305e756
--- /dev/null
+++ b/releasenotes/notes/compare-header-version-func-de5139b2161b3627.yaml
@@ -0,0 +1,15 @@
+---
+features:
+ - |
+ Add a new function called ``compare_version_header_to_response`` to
+ ``tempest.lib.common.api_version_utils``, which compares the API
+ micoversion in the response header to another microversion using the
+ comparators defined in
+ ``tempest.lib.common.api_version_request.APIVersionRequest``.
+
+ It is now possible to determine how to retrieve an attribute from a
+ response body of an API call, depending on the returned microversion.
+
+ Add a new exception type called ``InvalidParam`` to
+ ``tempest.lib.exceptions``, allowing the possibility of raising an
+ exception if an invalid parameter is passed to a library function.
diff --git a/releasenotes/notes/credentials-factory-stable-c8037bd9ae642482.yaml b/releasenotes/notes/credentials-factory-stable-c8037bd9ae642482.yaml
new file mode 100644
index 0000000..6faa536
--- /dev/null
+++ b/releasenotes/notes/credentials-factory-stable-c8037bd9ae642482.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ The credentials_factory.py module is now marked as stable for Tempest
+ plugins. It provides helpers that can be used by Tempest plugins to
+ obtain test credentials for their test cases in a format that honors the
+ Tempest configuration in use.
+ Credentials may be provisioned on the fly during the test run, or they
+ can be setup in advance and fed to test via a YAML file; they can be
+ setup for identity v2 or identity v3.
diff --git a/releasenotes/notes/disable-identity-v2-testing-4ef1565d1a5aedcf.yaml b/releasenotes/notes/disable-identity-v2-testing-4ef1565d1a5aedcf.yaml
new file mode 100644
index 0000000..e5d4ab7
--- /dev/null
+++ b/releasenotes/notes/disable-identity-v2-testing-4ef1565d1a5aedcf.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ As of the Queens release, tempest no longer tests the identity v2.0 API
+ because the majority of the v2.0 API have been removed from the identity
+ project. Once the Queens release reaches end-of-life, we can remove the
+ v2.0 tempest tests and clean up v2.0 testing cruft.
diff --git a/releasenotes/notes/drop-DEFAULT_PARAMS-bfcc2e7b74ef880b.yaml b/releasenotes/notes/drop-DEFAULT_PARAMS-bfcc2e7b74ef880b.yaml
new file mode 100644
index 0000000..c9a49a7
--- /dev/null
+++ b/releasenotes/notes/drop-DEFAULT_PARAMS-bfcc2e7b74ef880b.yaml
@@ -0,0 +1,13 @@
+---
+upgrade:
+ - |
+ Replace any call in your code to credentials_factory.DEFAULT_PARAMS with
+ a call to config.service_client_config().
+fixes:
+ - |
+ The credentials_factory module used to load configuration at import time
+ which caused configuration being loaded at test discovery time.
+ This was fixed by removing the DEFAULT_PARAMS variable. This variable
+ was redundant (and outdated), the same dictionary (but up to date) can
+ be obtained via invoking config.service_client_config() with no service
+ parameter.
diff --git a/releasenotes/notes/extra-compute-services-tests-92b6c0618972e02f.yaml b/releasenotes/notes/extra-compute-services-tests-92b6c0618972e02f.yaml
new file mode 100644
index 0000000..414adf1
--- /dev/null
+++ b/releasenotes/notes/extra-compute-services-tests-92b6c0618972e02f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add the ``disable_log_reason`` and the ``update_forced_down`` API endpoints
+ to the compute ``services_client``.
+ Add '2.11' compute validation schema for compute services API.
diff --git a/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
new file mode 100644
index 0000000..775a383
--- /dev/null
+++ b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix list_group_snapshots API in v3 group_snapshots_client: Bug#1715786.
+ The url path for list group snapshots with details API is changed from
+ ``?detail=True`` to ``/detail``.
diff --git a/releasenotes/notes/fix-remoteclient-default-ssh-shell-prologue-33e99343d086f601.yaml b/releasenotes/notes/fix-remoteclient-default-ssh-shell-prologue-33e99343d086f601.yaml
new file mode 100644
index 0000000..5063fd5
--- /dev/null
+++ b/releasenotes/notes/fix-remoteclient-default-ssh-shell-prologue-33e99343d086f601.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fix RemoteClient default ssh_shell_prologue: Bug#1707478
+
+ The default ssh_shell_proloque has been modified from
+ specifying erroneous PATH=$$PATH:/sbin to PATH=$PATH:/sbin.
diff --git a/releasenotes/notes/http_proxy_config-cb39b55520e84db5.yaml b/releasenotes/notes/http_proxy_config-cb39b55520e84db5.yaml
new file mode 100644
index 0000000..56969de
--- /dev/null
+++ b/releasenotes/notes/http_proxy_config-cb39b55520e84db5.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - Adds a new config options, ``proxy_url``. This options is used to configure
+ running tempest through a proxy server.
+ - The RestClient class in tempest.lib.rest_client has a new kwarg parameters,
+ ``proxy_url``, that is used to set a proxy server.
+ - A new class was added to tempest.lib.http, ClosingProxyHttp. This behaves
+ identically to ClosingHttp except that it requires a proxy url and will
+ establish a connection through a proxy
diff --git a/releasenotes/notes/identity-tests-domain-drivers-76235f6672221e45.yaml b/releasenotes/notes/identity-tests-domain-drivers-76235f6672221e45.yaml
new file mode 100644
index 0000000..7ed3081
--- /dev/null
+++ b/releasenotes/notes/identity-tests-domain-drivers-76235f6672221e45.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ A new boolean config option ``domain_specific_drivers``
+ is added to the section ``identity-feature-enabled``.
+ This option must be enabled when testing an environment that
+ is configured to use domain-specific identity drivers.
diff --git a/releasenotes/notes/identity_client-635275d43abbb807.yaml b/releasenotes/notes/identity_client-635275d43abbb807.yaml
new file mode 100644
index 0000000..6f984b7
--- /dev/null
+++ b/releasenotes/notes/identity_client-635275d43abbb807.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Enhances the v3 identity client with the ``check_token_existence``
+ endpoint, allowing users to check the existence of tokens
diff --git a/releasenotes/notes/intermediate-pike-release-2ce492432ff8f012.yaml b/releasenotes/notes/intermediate-pike-release-2ce492432ff8f012.yaml
new file mode 100644
index 0000000..bfebcd9
--- /dev/null
+++ b/releasenotes/notes/intermediate-pike-release-2ce492432ff8f012.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+ This is an intermediate release during the Pike development cycle to
+ make new functionality available to plugins and other consumers.
diff --git a/releasenotes/notes/intermediate-queens-release-2f9f305775fca454.yaml b/releasenotes/notes/intermediate-queens-release-2f9f305775fca454.yaml
new file mode 100644
index 0000000..1493b0b
--- /dev/null
+++ b/releasenotes/notes/intermediate-queens-release-2f9f305775fca454.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+ This is an intermediate release during the Queens development cycle to
+ make new functionality available to plugins and other consumers.
diff --git a/releasenotes/notes/list-auth-domains-v3-endpoint-9ec60c7d3011c397.yaml b/releasenotes/notes/list-auth-domains-v3-endpoint-9ec60c7d3011c397.yaml
new file mode 100644
index 0000000..0f104cf
--- /dev/null
+++ b/releasenotes/notes/list-auth-domains-v3-endpoint-9ec60c7d3011c397.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add ``list_auth_domains`` API endpoint to the identity v3 client. This
+ allows the possibility of listing all domains a user has access to
+ via role assignments.
diff --git a/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml b/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
new file mode 100644
index 0000000..2bba952
--- /dev/null
+++ b/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Define below object storage service clients as libraries.
+ Add new service clients to the library interface so the
+ other projects can use these modules as stable libraries
+ without any maintenance changes.
+
+ * account_client
+ * container_client
+ * object_client
diff --git a/releasenotes/notes/migrate-dynamic-creds-ecebb47528080761.yaml b/releasenotes/notes/migrate-dynamic-creds-ecebb47528080761.yaml
new file mode 100644
index 0000000..c20cbc6
--- /dev/null
+++ b/releasenotes/notes/migrate-dynamic-creds-ecebb47528080761.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ The tempest module tempest.common.dynamic creds which is used for
+ dynamically allocating credentials has been migrated into tempest lib.
diff --git a/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml b/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml
new file mode 100644
index 0000000..72b8e26
--- /dev/null
+++ b/releasenotes/notes/migrate-object-storage-as-stable-interface-42014c7b43ecb254.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Define below object storage service clients as libraries.
+ Add new service clients to the library interface so the
+ other projects can use these modules as stable libraries
+ without any maintenance changes.
+
+ * bulk_middleware_client
+ * capabilities_client
diff --git a/releasenotes/notes/migrate-preprov-creds-ef61a046ee1ec604.yaml b/releasenotes/notes/migrate-preprov-creds-ef61a046ee1ec604.yaml
new file mode 100644
index 0000000..aa5f71a
--- /dev/null
+++ b/releasenotes/notes/migrate-preprov-creds-ef61a046ee1ec604.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - The tempest module tempest.common.preprov_creds which is used to provide
+ credentials from a list of preprovisioned resources has been migrated into
+ tempest lib at tempest.lib.common.preprov_creds.
+ - The InvalidTestResource exception class from tempest.exceptions has been
+ migrated into tempest.lib.exceptions
+ - The tempest module tempest.common.fixed_network which provided utilities for
+ finding fixed networks by and helpers for picking the network to use when
+ multiple tenant networks are available has been migrated into tempest lib
+ at tempest.lib.common.fixed_network.
diff --git a/releasenotes/notes/plugin-client-registration-enhancements-e09131742391225b.yaml b/releasenotes/notes/plugin-client-registration-enhancements-e09131742391225b.yaml
new file mode 100644
index 0000000..b6391b6
--- /dev/null
+++ b/releasenotes/notes/plugin-client-registration-enhancements-e09131742391225b.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ When registering service clients from installed plugins, all registrations
+ are now processed, even if one or more fails. All exceptions encountered
+ during the registration process are recorded. If at least one exception
+ was encountered, the registration process fails and all interim errors are
+ reported.
+ - |
+ The __repr__ method is now implemented for the base `tempest.Exception`
+ class, its implementation is identical to __str__: it reports the error
+ message merged with input parameters.
diff --git a/releasenotes/notes/raise-exception-when-error-deleting-on-volume-18d0d0c5886212dd.yaml b/releasenotes/notes/raise-exception-when-error-deleting-on-volume-18d0d0c5886212dd.yaml
new file mode 100644
index 0000000..194dbc1
--- /dev/null
+++ b/releasenotes/notes/raise-exception-when-error-deleting-on-volume-18d0d0c5886212dd.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Tempest checks a volume delete by waiting for NotFound(404) on
+ show_volume(). Sometime a volume delete fails and the volume status
+ becomes error_deleting which means the delete is failed.
+ So Tempest doesn't need to wait anymore. A new release of Tempest
+ raises an exception DeleteErrorException instead of waiting.
diff --git a/releasenotes/notes/remove-deprecated-apis-from-v2-volumes-client-3ca4a5db5fea518f.yaml b/releasenotes/notes/remove-deprecated-apis-from-v2-volumes-client-3ca4a5db5fea518f.yaml
new file mode 100644
index 0000000..c75da2e
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-apis-from-v2-volumes-client-3ca4a5db5fea518f.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+ - |
+ Remove deprecated APIs from volume v2 volumes_client, and the deprecated
+ APIs are re-realized in volume v2 transfers_client.
+
+ * create_volume_transfer
+ * show_volume_transfer
+ * list_volume_transfers
+ * delete_volume_transfer
+ * accept_volume_transfer
diff --git a/releasenotes/notes/remove-deprecated-skip-decorators-f8b42d812d20b537.yaml b/releasenotes/notes/remove-deprecated-skip-decorators-f8b42d812d20b537.yaml
new file mode 100644
index 0000000..920bc5d
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-skip-decorators-f8b42d812d20b537.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Remove two deprecated skip decorators in ``config`` module:
+ ``skip_unless_config`` and ``skip_if_config``.
diff --git a/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml b/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml
new file mode 100644
index 0000000..621731d
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Remove the deprecated decorator ``skip_unless_attr`` in lib/decorators.py.
diff --git a/releasenotes/notes/remove-deprecated-volume-apis-from-v2-volumes-client-cf35e5b4cca89860.yaml b/releasenotes/notes/remove-deprecated-volume-apis-from-v2-volumes-client-cf35e5b4cca89860.yaml
new file mode 100644
index 0000000..12ac5b5
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-volume-apis-from-v2-volumes-client-cf35e5b4cca89860.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove deprecated APIs (``show_pools`` and ``show_backend_capabilities``)
+ from volume v2 volumes_client, and the deprecated APIs are re-realized in
+ volume v2 scheduler_stats_client (``list_pools``) and capabilities_client
+ (``show_backend_capabilities``) accordingly.
diff --git a/releasenotes/notes/remove-get-ipv6-addr-by-EUI64-c79972d799c7a430.yaml b/releasenotes/notes/remove-get-ipv6-addr-by-EUI64-c79972d799c7a430.yaml
new file mode 100644
index 0000000..609000c
--- /dev/null
+++ b/releasenotes/notes/remove-get-ipv6-addr-by-EUI64-c79972d799c7a430.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Remove deprecated get_ipv6_addr_by_EUI64 method from data_utils.
+ Use the same method from oslo_utils.netutils.
diff --git a/releasenotes/notes/remove-support-of-py34-7d59fdb431fefe24.yaml b/releasenotes/notes/remove-support-of-py34-7d59fdb431fefe24.yaml
new file mode 100644
index 0000000..093228a
--- /dev/null
+++ b/releasenotes/notes/remove-support-of-py34-7d59fdb431fefe24.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - |
+ Remove the support of python3.4, because in Ubuntu Xenial only
+ python3.5 is available (python3.4 is restricted to <= Mitaka).
diff --git a/releasenotes/notes/start-of-pike-support-f2a1b7ea8e8b0311.yaml b/releasenotes/notes/start-of-pike-support-f2a1b7ea8e8b0311.yaml
new file mode 100644
index 0000000..0787821
--- /dev/null
+++ b/releasenotes/notes/start-of-pike-support-f2a1b7ea8e8b0311.yaml
@@ -0,0 +1,11 @@
+---
+prelude: >
+ This release marks the start of support for the Pike release in Tempest.
+other:
+ - OpenStack Releases supported after this release are **Pike**, **Ocata**,
+ and **Newton**.
+
+ The release under current development of this tag is Queens, meaning
+ that every Tempest commit is also tested against master during the Queens
+ cycle. However, this does not necessarily mean that using Tempest as of
+ this tag will work against a Queens (or future release) cloud.
diff --git a/releasenotes/notes/tempest-identity-catalog-client-f5c8589a9d7c1eb5.yaml b/releasenotes/notes/tempest-identity-catalog-client-f5c8589a9d7c1eb5.yaml
new file mode 100644
index 0000000..dcaaceb
--- /dev/null
+++ b/releasenotes/notes/tempest-identity-catalog-client-f5c8589a9d7c1eb5.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add a new identity catalog client. At this point, the new client
+ contains a single functionality, "show_catalog", which returns a
+ catalog object.
diff --git a/releasenotes/notes/test-clients-stable-for-plugin-90b1e7dc83f28ccd.yaml b/releasenotes/notes/test-clients-stable-for-plugin-90b1e7dc83f28ccd.yaml
new file mode 100644
index 0000000..e27ee33
--- /dev/null
+++ b/releasenotes/notes/test-clients-stable-for-plugin-90b1e7dc83f28ccd.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Two extra modules are now marked as stable for plugins, test.py and clients.py.
+ The former includes the test base class with its automatic credentials
+ provisioning and test resource managing fixtures.
+ The latter is built on top of ServiceClients and it adds aliases and a few custom
+ configurations to it.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 3137541..57ec7e1 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -65,16 +65,12 @@
project = u'tempest Release Notes'
copyright = u'2016, tempest Developers'
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-from tempest.version import version_info as tempest_version
+# Release do not need a version number in the title, they
+# cover multiple versions.
# The full version, including alpha/beta/rc tags.
-release = tempest_version.version_string_with_vcs()
+release = ''
# The short X.Y version.
-version = tempest_version.canonical_version_string()
+version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -158,10 +154,6 @@
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-# html_use_smartypants = True
-
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index d2be814..df1de46 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,8 @@
:maxdepth: 1
unreleased
+ v17.0.0
+ v16.1.0
v16.0.0
v15.0.0
v14.0.0
diff --git a/releasenotes/source/v16.1.0.rst b/releasenotes/source/v16.1.0.rst
new file mode 100644
index 0000000..e24a70f
--- /dev/null
+++ b/releasenotes/source/v16.1.0.rst
@@ -0,0 +1,6 @@
+=====================
+v16.1.0 Release Notes
+=====================
+
+.. release-notes:: 16.1.0 Release Notes
+ :version: 16.1.0
diff --git a/releasenotes/source/v17.0.0.rst b/releasenotes/source/v17.0.0.rst
new file mode 100644
index 0000000..3f50f11
--- /dev/null
+++ b/releasenotes/source/v17.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v17.0.0 Release Notes
+=====================
+
+.. release-notes:: 17.0.0 Release Notes
+ :version: 17.0.0
diff --git a/requirements.txt b/requirements.txt
index 259a4cf..2300214 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,24 +2,24 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
-cliff>=2.6.0 # Apache-2.0
-jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
-testtools>=1.4.0 # MIT
-paramiko>=2.0 # LGPLv2.1+
-netaddr!=0.7.16,>=0.7.13 # BSD
+cliff!=2.9.0,>=2.8.0 # Apache-2.0
+jsonschema<3.0.0,>=2.6.0 # MIT
+testtools>=2.2.0 # MIT
+paramiko>=2.0.0 # LGPLv2.1+
+netaddr>=0.7.18 # BSD
testrepository>=0.0.18 # Apache-2.0/BSD
-oslo.concurrency>=3.8.0 # Apache-2.0
-oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0
-oslo.log>=3.22.0 # Apache-2.0
-oslo.serialization>=1.10.0 # Apache-2.0
-oslo.utils>=3.20.0 # Apache-2.0
-six>=1.9.0 # MIT
+oslo.concurrency>=3.20.0 # Apache-2.0
+oslo.config>=5.1.0 # Apache-2.0
+oslo.log>=3.30.0 # Apache-2.0
+oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
+oslo.utils>=3.31.0 # Apache-2.0
+six>=1.10.0 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
-PyYAML>=3.10.0 # MIT
-python-subunit>=0.0.18 # Apache-2.0/BSD
+PyYAML>=3.10 # MIT
+python-subunit>=1.0.0 # Apache-2.0/BSD
stevedore>=1.20.0 # Apache-2.0
PrettyTable<0.8,>=0.7.1 # BSD
-os-testr>=0.8.0 # Apache-2.0
+os-testr>=1.0.0 # Apache-2.0
urllib3>=1.21.1 # MIT
debtcollector>=1.2.0 # Apache-2.0
-unittest2 # BSD
+unittest2>=1.1.0 # BSD
diff --git a/roles/acl-devstack-files/README.rst b/roles/acl-devstack-files/README.rst
new file mode 100644
index 0000000..76e7e58
--- /dev/null
+++ b/roles/acl-devstack-files/README.rst
@@ -0,0 +1,10 @@
+Grant global read access to devstack `files` folder.
+
+This is handy to grant the `tempest` user access to VM images for testing.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_data_dir
+ :default: /opt/stack/data
+
+ The devstack data directory.
diff --git a/roles/acl-devstack-files/defaults/main.yaml b/roles/acl-devstack-files/defaults/main.yaml
new file mode 100644
index 0000000..14265f0
--- /dev/null
+++ b/roles/acl-devstack-files/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_data_dir: /opt/stack/data
diff --git a/roles/acl-devstack-files/tasks/main.yaml b/roles/acl-devstack-files/tasks/main.yaml
new file mode 100644
index 0000000..b3eeec7
--- /dev/null
+++ b/roles/acl-devstack-files/tasks/main.yaml
@@ -0,0 +1,6 @@
+- name: Grant global read access to devstack files
+ file:
+ path: "{{devstack_data_dir}}/files"
+ mode: "o+rx"
+ recurse: yes
+ become: yes
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
new file mode 100644
index 0000000..b05326d
--- /dev/null
+++ b/roles/process-stackviz/README.rst
@@ -0,0 +1,22 @@
+Generate stackviz report.
+
+Generate stackviz report using subunit and dstat data, using
+the stackviz archive embedded in test images.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: stage_dir
+ :default: /opt/stack/logs
+
+ The stage directory where the input data can be found and
+ the output will be produced.
+
+.. zuul:rolevar:: test_results_stage_name
+ :default: test_results
+
+ The name of the subunit file to be used as input.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
new file mode 100644
index 0000000..b1eb8d9
--- /dev/null
+++ b/roles/process-stackviz/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+stage_dir: /opt/stack/
+test_results_stage_name: test_results
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
new file mode 100644
index 0000000..09de606
--- /dev/null
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -0,0 +1,63 @@
+- name: Check if stackviz archive exists
+ stat:
+ path: "/opt/cache/files/stackviz-latest.tar.gz"
+ register: stackviz_archive
+
+- debug:
+ msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
+ when: not stackviz_archive.stat.exists
+
+- name: Check if subunit data exists
+ stat:
+ path: "{{ stage_dir }}/{{ test_results_stage_name }}.subunit"
+ register: subunit_input
+
+- debug:
+ msg: "Subunit file could not be found at {{ stage_dir }}/{{ test_results_stage_name }}.subunit"
+ when: not subunit_input.stat.exists
+
+- name: Install stackviz
+ pip:
+ name: "file://{{ stackviz_archive.stat.path }}"
+ virtualenv: /tmp/stackviz
+ extra_args: -U
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+- name: Deploy stackviz static html+js
+ command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+- name: Check if dstat data exists
+ stat:
+ path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
+ register: dstat_input
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+- name: Run stackviz with dstat
+ shell: |
+ cat {{ subunit_input.stat.path }} | \
+ /tmp/stackviz/bin/stackviz-export \
+ --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
+ --env --stdin \
+ {{ stage_dir }}/stackviz/data
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ - dstat_input.stat.exists
+
+- name: Run stackviz without dstat
+ shell: |
+ cat {{ subunit_input.stat.path }} | \
+ /tmp/stackviz/bin/stackviz-export \
+ --env --stdin \
+ {{ stage_dir }}/stackviz/data
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ - not dstat_input.stat.exists
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
new file mode 100644
index 0000000..001586e
--- /dev/null
+++ b/roles/run-tempest/README.rst
@@ -0,0 +1,25 @@
+Run Tempest
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: tempest_concurrency
+ :default: 0
+
+ The number of parallel test processes.
+
+.. zuul:rolevar:: tempest_test_regex
+ :default: ''
+
+ A regular expression used to select the tests.
+ It works only when used with some specific tox environments
+ ('all', 'all-plugin'.)
+
+.. zuul:rolevar:: tox_venvlist
+ :default: smoke
+
+ The Tempest tox environment to run.
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
new file mode 100644
index 0000000..3e57511
--- /dev/null
+++ b/roles/run-tempest/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+tempest_test_regex: ''
+tox_venvlist: smoke
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
new file mode 100644
index 0000000..297cd72
--- /dev/null
+++ b/roles/run-tempest/tasks/main.yaml
@@ -0,0 +1,28 @@
+# NOTE(andreaf) The number of vcpus is not available on all systems.
+# See https://github.com/ansible/ansible/issues/30688
+# When not available, we fall back to ansible_processor_cores
+- name: Get hw.logicalcpu from sysctl
+ shell: sysctl hw.logicalcpu | cut -d' ' -f2
+ register: sysctl_hw_logicalcpu
+ when: ansible_processor_vcpus is not defined
+
+- name: Number of cores
+ set_fact:
+ num_cores: "{{ansible_processor_vcpus|default(sysctl_hw_logicalcpu.stdout)}}"
+
+- name: Set concurrency for cores == 3 or less
+ set_fact:
+ default_concurrency: "{{ num_cores }}"
+ when: num_cores|int <= 3
+
+- name: Limit max concurrency when more than 3 vcpus are available
+ set_fact:
+ default_concurrency: "{{ num_cores|int // 2 }}"
+ when: num_cores|int > 3
+
+- name: Run Tempest
+ command: tox -e {{tox_venvlist}} -- {{tempest_test_regex|quote}} --concurrency={{tempest_concurrency|default(default_concurrency)}}
+ args:
+ chdir: "{{devstack_base_dir}}/tempest"
+ become: true
+ become_user: tempest
diff --git a/roles/setup-tempest-data-dir/README.rst b/roles/setup-tempest-data-dir/README.rst
new file mode 100644
index 0000000..db0b083
--- /dev/null
+++ b/roles/setup-tempest-data-dir/README.rst
@@ -0,0 +1,12 @@
+Setup the `tempest` user as owner of Tempest's data folder.
+
+Tempest's devstack plugin creates the data folder, but it has no knowledge
+of the `tempest` user, so we need a role to fix ownership on the data folder.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_data_dir
+ :default: /opt/stack/data
+
+ The devstack data directory.
diff --git a/roles/setup-tempest-data-dir/defaults/main.yaml b/roles/setup-tempest-data-dir/defaults/main.yaml
new file mode 100644
index 0000000..14265f0
--- /dev/null
+++ b/roles/setup-tempest-data-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_data_dir: /opt/stack/data
diff --git a/roles/setup-tempest-data-dir/tasks/main.yaml b/roles/setup-tempest-data-dir/tasks/main.yaml
new file mode 100644
index 0000000..9dd6309
--- /dev/null
+++ b/roles/setup-tempest-data-dir/tasks/main.yaml
@@ -0,0 +1,7 @@
+- name: Set tempest as owner of Tempest data folder
+ file:
+ path: "{{devstack_data_dir}}/tempest"
+ owner: tempest
+ group: stack
+ recurse: yes
+ become: yes
diff --git a/roles/setup-tempest-run-dir/README.rst b/roles/setup-tempest-run-dir/README.rst
new file mode 100644
index 0000000..c8e2339
--- /dev/null
+++ b/roles/setup-tempest-run-dir/README.rst
@@ -0,0 +1,14 @@
+Setup Tempest run folder.
+
+To support isolation between multiple runs, separate run folders are required.
+Set `tempest` as owner of Tempest's current run folder.
+There is an implicit assumption here of a one to one relationship between
+devstack versions and Tempest runs.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/setup-tempest-run-dir/defaults/main.yaml b/roles/setup-tempest-run-dir/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/setup-tempest-run-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/setup-tempest-run-dir/tasks/main.yaml b/roles/setup-tempest-run-dir/tasks/main.yaml
new file mode 100644
index 0000000..a012d72
--- /dev/null
+++ b/roles/setup-tempest-run-dir/tasks/main.yaml
@@ -0,0 +1,7 @@
+- name: Set tempest as owner of Tempest run folder
+ file:
+ path: "{{devstack_base_dir}}/tempest"
+ owner: tempest
+ group: stack
+ recurse: yes
+ become: yes
diff --git a/setup.cfg b/setup.cfg
index b292970..04bb29f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
-home-page = http://docs.openstack.org/developer/tempest/
+home-page = https://docs.openstack.org/tempest/latest/
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
@@ -16,7 +16,6 @@
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
[files]
diff --git a/tempest/README.rst b/tempest/README.rst
index 0feec41..663653e 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -9,12 +9,13 @@
OpenStack clouds.
As such Tempest tests come in many flavors, each with their own rules
-and guidelines. Below is the proposed Havana restructuring for Tempest
+and guidelines. Below is the overview of the Tempest respository structure
to make this clear.
| tempest/
| api/ - API tests
| scenario/ - complex scenario tests
+| tests/ - unit tests for Tempest internals
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
@@ -24,8 +25,8 @@
----------------------
API tests are validation tests for the OpenStack API. They should not
-use the existing python clients for OpenStack, but should instead use
-the tempest implementations of clients. Having raw clients let us
+use the existing Python clients for OpenStack, but should instead use
+the Tempest implementations of clients. Having raw clients let us
pass invalid JSON to the APIs and see the results, something we could
not get with the native clients.
@@ -41,14 +42,14 @@
functionality. They are typically a series of steps where complicated
state requiring multiple services is set up exercised, and torn down.
-Scenario tests should not use the existing python clients for OpenStack,
-but should instead use the tempest implementations of clients.
+Scenario tests should not use the existing Python clients for OpenStack,
+but should instead use the Tempest implementations of clients.
:ref:`unit_tests_field_guide`
-----------------------------
Unit tests are the self checks for Tempest. They provide functional
-verification and regression checking for the internal components of tempest.
-They should be used to just verify that the individual pieces of tempest are
+verification and regression checking for the internal components of Tempest.
+They should be used to just verify that the individual pieces of Tempest are
working as expected.
diff --git a/tempest/api/README.rst b/tempest/api/README.rst
index 91e6ad6..a796922 100644
--- a/tempest/api/README.rst
+++ b/tempest/api/README.rst
@@ -13,7 +13,8 @@
It's also important to test not only the expected positive path on
APIs, but also to provide them with invalid data to ensure they fail
-in expected and documented ways. Over the course of the OpenStack
+in expected and documented ways. The latter type of tests is called
+``negative tests`` in Tempest source code. Over the course of the OpenStack
project Tempest has discovered many fundamental bugs by doing just
this.
@@ -22,7 +23,7 @@
spinning up a server, image, etc, then operating on it.
-Why are these tests in tempest?
+Why are these tests in Tempest?
-------------------------------
This is one of the core missions for the Tempest project, and where it
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 69cbfb5..0901374 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -86,7 +86,7 @@
body = self.client.create_agent(**self.params_agent)['agent']
self.addCleanup(self.client.delete_agent, body['agent_id'])
agents = self.client.list_agents()['agents']
- self.assertNotEmpty(agents, 'Cannot get any agents.(%s)' % agents)
+ self.assertNotEmpty(agents, 'Cannot get any agents.')
self.assertIn(body['agent_id'], map(lambda x: x['agent_id'], agents))
@decorators.idempotent_id('eabadde4-3cd7-4ec4-a4b5-5a936d2d4408')
@@ -104,7 +104,7 @@
agent_id_xen = agent_xen['agent_id']
agents = (self.client.list_agents(hypervisor=agent_xen['hypervisor'])
['agents'])
- self.assertNotEmpty(agents, 'Cannot get any agents.(%s)' % agents)
+ self.assertNotEmpty(agents, 'Cannot get any agents.')
self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
agents))
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 902ea9a..57d3983 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -125,7 +125,6 @@
name=aggregate_name, availability_zone=az_name)
self.assertEqual(az_name, aggregate['availability_zone'])
- self.assertIsNotNone(aggregate['id'])
aggregate_id = aggregate['id']
new_aggregate_name = aggregate_name + '_new'
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index 41be620..36ff09e 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -27,7 +27,6 @@
def setup_clients(cls):
super(AggregatesAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_admin.aggregates_client
- cls.user_client = cls.aggregates_client
@classmethod
def resource_setup(cls):
@@ -52,7 +51,7 @@
# Regular user is not allowed to create an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
self.assertRaises(lib_exc.Forbidden,
- self.user_client.create_aggregate,
+ self.aggregates_client.create_aggregate,
name=aggregate_name)
@decorators.attr(type=['negative'])
@@ -87,7 +86,7 @@
# Regular user is not allowed to delete an aggregate.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.Forbidden,
- self.user_client.delete_aggregate,
+ self.aggregates_client.delete_aggregate,
aggregate['id'])
@decorators.attr(type=['negative'])
@@ -95,7 +94,7 @@
def test_aggregate_list_as_user(self):
# Regular user is not allowed to list aggregates.
self.assertRaises(lib_exc.Forbidden,
- self.user_client.list_aggregates)
+ self.aggregates_client.list_aggregates)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('557cad12-34c9-4ff4-95f0-22f0dfbaf7dc')
@@ -103,7 +102,7 @@
# Regular user is not allowed to get aggregate details.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.Forbidden,
- self.user_client.show_aggregate,
+ self.aggregates_client.show_aggregate,
aggregate['id'])
@decorators.attr(type=['negative'])
@@ -140,7 +139,7 @@
# Regular user is not allowed to add a host to an aggregate.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.Forbidden,
- self.user_client.add_host,
+ self.aggregates_client.add_host,
aggregate['id'], host=self.host)
@decorators.attr(type=['negative'])
@@ -168,7 +167,7 @@
host=self.host)
self.assertRaises(lib_exc.Forbidden,
- self.user_client.remove_host,
+ self.aggregates_client.remove_host,
aggregate['id'], host=self.host)
@decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index 83fe215..a9772c4 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -16,12 +16,11 @@
from tempest.api.compute import base
from tempest.common import compute
-from tempest.common import credentials_factory as credentials
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_excs
-from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
@@ -46,14 +45,10 @@
@classmethod
def skip_checks(cls):
super(AutoAllocateNetworkTest, cls).skip_checks()
- identity_version = cls.get_identity_version()
- if not credentials.is_admin_available(
- identity_version=identity_version):
- msg = "Missing Identity Admin API credentials in configuration."
- raise cls.skipException(msg)
if not CONF.service_available.neutron:
raise cls.skipException('Neutron is required')
- if not test.is_extension_enabled('auto-allocated-topology', 'network'):
+ if not utils.is_extension_enabled('auto-allocated-topology',
+ 'network'):
raise cls.skipException(
'auto-allocated-topology extension is not available')
@@ -148,6 +143,8 @@
test_utils.call_and_ignore_notfound_exc(
cls.networks_client.delete_network, network['id'])
+ super(AutoAllocateNetworkTest, cls).resource_cleanup()
+
@decorators.idempotent_id('5eb7b8fa-9c23-47a2-9d7d-02ed5809dd34')
def test_server_create_no_allocate(self):
"""Tests that no networking is allocated for the server."""
@@ -180,9 +177,11 @@
_, servers = compute.create_test_server(
self.os_primary, networks='auto', wait_until='ACTIVE',
min_count=3)
- server_nets = set()
for server in servers:
self.addCleanup(self.delete_server, server['id'])
+
+ server_nets = set()
+ for server in servers:
# get the server ips
addresses = self.servers_client.list_addresses(
server['id'])['addresses']
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 3449aba..08b2d19 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -17,16 +17,16 @@
from tempest.api.compute import base
from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
- disk_config = 'AUTO'
-
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
@@ -37,12 +37,6 @@
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.client = cls.servers_client
- @classmethod
- def resource_setup(cls):
- cls.set_validation_resources()
-
- super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
-
@decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
@@ -69,20 +63,30 @@
admin_pass = self.image_ssh_password
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
server_no_eph_disk = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server_no_eph_disk['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ server_no_eph_disk['id'])
+
# Get partition number of server without ephemeral disk.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server_no_eph_disk),
+ self.get_server_ip(server_no_eph_disk,
+ validation_resources),
self.ssh_user,
admin_pass,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server_no_eph_disk,
servers_client=self.client)
disks_num = len(linux_client.get_disks().split('\n'))
@@ -92,17 +96,25 @@
server_with_eph_disk = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server_with_eph_disk['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ server_with_eph_disk['id'])
+
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server_with_eph_disk),
+ self.get_server_ip(server_with_eph_disk,
+ validation_resources),
self.ssh_user,
admin_pass,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server_with_eph_disk,
servers_client=self.client)
disks_num_eph = len(linux_client.get_disks().split('\n'))
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 1e09eeb..66c2c2d 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -29,6 +29,8 @@
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
+ if not utils.get_service_list()['network']:
+ raise cls.skipException("network service not enabled.")
@classmethod
def setup_clients(cls):
@@ -40,6 +42,7 @@
super(FixedIPsTestJson, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.show_server(server['id'])['server']
+ cls.ip = None
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
@@ -47,19 +50,19 @@
break
if cls.ip:
break
+ if cls.ip is None:
+ raise cls.skipException("No fixed ip found for server: %s"
+ % server['id'])
@decorators.idempotent_id('16b7d848-2f7c-4709-85a3-2dfb4576cc52')
- @test.services('network')
def test_list_fixed_ip_details(self):
fixed_ip = self.client.show_fixed_ip(self.ip)
self.assertEqual(fixed_ip['fixed_ip']['address'], self.ip)
@decorators.idempotent_id('5485077b-7e46-4cec-b402-91dc3173433b')
- @test.services('network')
def test_set_reserve(self):
self.client.reserve_fixed_ip(self.ip, reserve="None")
@decorators.idempotent_id('7476e322-b9ff-4710-bf82-49d51bac6e2e')
- @test.services('network')
def test_set_unreserve(self):
self.client.reserve_fixed_ip(self.ip, unreserve="None")
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index a77011e..7d41f46 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -13,10 +13,10 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -29,6 +29,8 @@
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
+ if not utils.get_service_list()['network']:
+ raise cls.skipException("network service not enabled.")
@classmethod
def setup_clients(cls):
@@ -41,6 +43,7 @@
super(FixedIPsNegativeTestJson, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.show_server(server['id'])['server']
+ cls.ip = None
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
@@ -48,17 +51,18 @@
break
if cls.ip:
break
+ if cls.ip is None:
+ raise cls.skipException("No fixed ip found for server: %s"
+ % server['id'])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f17f47d-daad-4adc-986e-12370c93e407')
- @test.services('network')
def test_list_fixed_ip_details_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.show_fixed_ip, self.ip)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ce60042c-fa60-4836-8d43-1c8e3359dc47')
- @test.services('network')
def test_set_reserve_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reserve_fixed_ip,
@@ -66,7 +70,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f1f7a35b-0390-48c5-9803-5f27461439db')
- @test.services('network')
def test_set_unreserve_with_non_admin_user(self):
self.assertRaises(lib_exc.Forbidden,
self.non_admin_client.reserve_fixed_ip,
@@ -74,7 +77,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f51cf464-7fc5-4352-bc3e-e75cfa2cb717')
- @test.services('network')
def test_set_reserve_with_invalid_ip(self):
# NOTE(maurosr): since this exercises the same code snippet, we do it
# only for reserve action
@@ -87,7 +89,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('fd26ef50-f135-4232-9d32-281aab3f9176')
- @test.services('network')
def test_fixed_ip_with_invalid_action(self):
self.assertRaises(lib_exc.BadRequest,
self.client.reserve_fixed_ip,
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 36ebc25..1483c2e 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -16,10 +16,10 @@
import uuid
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class FlavorsAdminTestJSON(base.BaseV2ComputeAdminTest):
@@ -28,7 +28,7 @@
@classmethod
def skip_checks(cls):
super(FlavorsAdminTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+ if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 2c236ec..b8e2b42 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -14,8 +14,8 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class FlavorsAccessTestJSON(base.BaseV2ComputeAdminTest):
@@ -27,7 +27,7 @@
@classmethod
def skip_checks(cls):
super(FlavorsAccessTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+ if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index be165cb..45ca10a 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class FlavorsAccessNegativeTestJSON(base.BaseV2ComputeAdminTest):
@@ -30,7 +30,7 @@
@classmethod
def skip_checks(cls):
super(FlavorsAccessNegativeTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+ if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 747cb42..4d27a22 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class FlavorsExtraSpecsTestJSON(base.BaseV2ComputeAdminTest):
@@ -29,7 +29,7 @@
@classmethod
def skip_checks(cls):
super(FlavorsExtraSpecsTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+ if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
@@ -53,12 +53,11 @@
ephemeral=ephemeral,
swap=swap,
rxtx_factor=rxtx)['flavor']
-
- @classmethod
- def resource_cleanup(cls):
- cls.admin_flavors_client.delete_flavor(cls.flavor['id'])
- cls.admin_flavors_client.wait_for_resource_deletion(cls.flavor['id'])
- super(FlavorsExtraSpecsTestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(
+ cls.admin_flavors_client.wait_for_resource_deletion,
+ cls.flavor['id'])
+ cls.addClassResourceCleanup(cls.admin_flavors_client.delete_flavor,
+ cls.flavor['id'])
@decorators.idempotent_id('0b2f9d4b-1ca2-4b99-bb40-165d4bb94208')
def test_flavor_set_get_update_show_unset_keys(self):
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index f39feb9..5cde39e 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -15,10 +15,10 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class FlavorsExtraSpecsNegativeTestJSON(base.BaseV2ComputeAdminTest):
@@ -30,7 +30,7 @@
@classmethod
def skip_checks(cls):
super(FlavorsExtraSpecsNegativeTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
+ if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
@@ -55,12 +55,11 @@
ephemeral=ephemeral,
swap=swap,
rxtx_factor=rxtx)['flavor']
-
- @classmethod
- def resource_cleanup(cls):
- cls.admin_flavors_client.delete_flavor(cls.flavor['id'])
- cls.admin_flavors_client.wait_for_resource_deletion(cls.flavor['id'])
- super(FlavorsExtraSpecsNegativeTestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(
+ cls.admin_flavors_client.wait_for_resource_deletion,
+ cls.flavor['id'])
+ cls.addClassResourceCleanup(cls.admin_flavors_client.delete_flavor,
+ cls.flavor['id'])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d')
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index 496f119..ba19937 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -16,11 +16,11 @@
import netaddr
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
-from tempest import test
CONF = config.CONF
@@ -57,7 +57,7 @@
return
@decorators.idempotent_id('2c8f145f-8012-4cb8-ac7e-95a587f0e4ab')
- @test.services('network')
+ @utils.services('network')
def test_create_list_delete_floating_ips_bulk(self):
# Create, List and delete the Floating IPs Bulk
pool = 'test_pool'
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index 0e1e7ed..00f3256 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -65,9 +65,4 @@
resources = self.client.show_host(hostname)['host']
self.assertNotEmpty(resources)
host_resource = resources[0]['resource']
- self.assertIsNotNone(host_resource)
- self.assertIsNotNone(host_resource['cpu'])
- self.assertIsNotNone(host_resource['disk_gb'])
- self.assertIsNotNone(host_resource['memory_mb'])
- self.assertIsNotNone(host_resource['project'])
self.assertEqual(hostname, host_resource['host'])
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 0db802c..404fd94 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -30,26 +30,23 @@
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
- def assertHypervisors(self, hypers):
- self.assertNotEmpty(hypers, "No hypervisors found: %s" % hypers)
-
@decorators.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
- self.assertHypervisors(hypers)
+ self.assertNotEmpty(hypers, "No hypervisors found.")
@decorators.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
- self.assertHypervisors(hypers)
+ self.assertNotEmpty(hypers, "No hypervisors found.")
@decorators.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
- self.assertHypervisors(hypers)
+ self.assertNotEmpty(hypers, "No hypervisors found.")
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertNotEmpty(details)
@@ -60,7 +57,7 @@
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
- self.assertHypervisors(hypers)
+ self.assertNotEmpty(hypers, "No hypervisors found.")
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
@@ -116,7 +113,7 @@
@decorators.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
- self.assertHypervisors(hypers)
+ self.assertNotEmpty(hypers, "No hypervisors found.")
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
- self.assertHypervisors(hypers)
+ self.assertNotEmpty(hypers, "No hypervisors found.")
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 3859e64..411159b 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -20,22 +20,22 @@
from tempest.api.compute import base
from tempest.common import compute
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
-class LiveBlockMigrationTestJSON(base.BaseV2ComputeAdminTest):
+class LiveMigrationTest(base.BaseV2ComputeAdminTest):
max_microversion = '2.24'
block_migration = None
@classmethod
def skip_checks(cls):
- super(LiveBlockMigrationTestJSON, cls).skip_checks()
+ super(LiveMigrationTest, cls).skip_checks()
if not CONF.compute_feature_enabled.live_migration:
skip_msg = ("%s skipped as live-migration is "
@@ -46,26 +46,21 @@
"Less than 2 compute nodes, skipping migration test.")
@classmethod
- def setup_clients(cls):
- super(LiveBlockMigrationTestJSON, cls).setup_clients()
- cls.admin_hosts_client = cls.os_admin.hosts_client
- cls.admin_migration_client = cls.os_admin.migrations_client
+ def setup_credentials(cls):
+ # These tests don't attempt any SSH validation nor do they use
+ # floating IPs on the instance, so all we need is a network and
+ # a subnet so the instance being migrated has a single port, but
+ # we need that to make sure we are properly updating the port
+ # host bindings during the live migration.
+ # TODO(mriedem): SSH validation before and after the instance is
+ # live migrated would be a nice test wrinkle addition.
+ cls.set_network_resources(network=True, subnet=True)
+ super(LiveMigrationTest, cls).setup_credentials()
@classmethod
- def _get_compute_hostnames(cls):
- body = cls.admin_hosts_client.list_hosts()['hosts']
- return [
- host_record['host_name']
- for host_record in body
- if host_record['service'] == 'compute'
- ]
-
- def _get_server_details(self, server_id):
- body = self.admin_servers_client.show_server(server_id)['server']
- return body
-
- def _get_host_for_server(self, server_id):
- return self._get_server_details(server_id)['OS-EXT-SRV-ATTR:host']
+ def setup_clients(cls):
+ super(LiveMigrationTest, cls).setup_clients()
+ cls.admin_migration_client = cls.os_admin.migrations_client
def _migrate_server_to(self, server_id, dest_host, volume_backed=False):
kwargs = dict()
@@ -79,11 +74,6 @@
server_id, host=dest_host, block_migration=block_migration,
**kwargs)
- def _get_host_other_than(self, host):
- for target_host in self._get_compute_hostnames():
- if host != target_host:
- return target_host
-
def _live_migrate(self, server_id, target_host, state,
volume_backed=False):
self._migrate_server_to(server_id, target_host, volume_backed)
@@ -97,7 +87,7 @@
if (live_migration['instance_uuid'] == server_id):
msg += "\n%s" % live_migration
msg += "]"
- self.assertEqual(target_host, self._get_host_for_server(server_id),
+ self.assertEqual(target_host, self.get_host_for_server(server_id),
msg)
def _test_live_migration(self, state='ACTIVE', volume_backed=False):
@@ -114,8 +104,8 @@
# Live migrate an instance to another host
server_id = self.create_test_server(wait_until="ACTIVE",
volume_backed=volume_backed)['id']
- source_host = self._get_host_for_server(server_id)
- destination_host = self._get_host_other_than(source_host)
+ source_host = self.get_host_for_server(server_id)
+ destination_host = self.get_host_other_than(server_id)
if state == 'PAUSED':
self.admin_servers_client.pause_server(server_id)
@@ -144,7 +134,7 @@
@decorators.skip_because(bug="1524898")
@decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
- @test.services('volume')
+ @utils.services('volume')
def test_volume_backed_live_migration(self):
self._test_live_migration(volume_backed=True)
@@ -158,8 +148,7 @@
def test_iscsi_volume(self):
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
- actual_host = self._get_host_for_server(server_id)
- target_host = self._get_host_other_than(actual_host)
+ target_host = self.get_host_other_than(server_id)
volume = self.create_volume()
@@ -174,11 +163,11 @@
server = self.admin_servers_client.show_server(server_id)['server']
volume_id2 = server["os-extended-volumes:volumes_attached"][0]["id"]
- self.assertEqual(target_host, self._get_host_for_server(server_id))
+ self.assertEqual(target_host, self.get_host_for_server(server_id))
self.assertEqual(volume_id1, volume_id2)
-class LiveBlockMigrationRemoteConsolesV26TestJson(LiveBlockMigrationTestJSON):
+class LiveMigrationRemoteConsolesV26Test(LiveMigrationTest):
min_microversion = '2.6'
max_microversion = 'latest'
@@ -186,7 +175,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.serial_console,
'Serial console not supported.')
@testtools.skipUnless(
- test.is_scheduler_filter_enabled("DifferentHostFilter"),
+ compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_live_migration_serial_console(self):
"""Test the live-migration of an instance which has a serial console
@@ -201,8 +190,8 @@
hints = {'different_host': server01_id}
server02_id = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')['id']
- host01_id = self._get_host_for_server(server01_id)
- host02_id = self._get_host_for_server(server02_id)
+ host01_id = self.get_host_for_server(server01_id)
+ host02_id = self.get_host_for_server(server02_id)
self.assertNotEqual(host01_id, host02_id)
# At this step we have 2 instances on different hosts, both with
@@ -216,7 +205,7 @@
self._migrate_server_to(server01_id, host02_id)
waiters.wait_for_server_status(self.servers_client,
server01_id, 'ACTIVE')
- self.assertEqual(host02_id, self._get_host_for_server(server01_id))
+ self.assertEqual(host02_id, self.get_host_for_server(server01_id))
self._verify_console_interaction(server01_id)
# At this point, both instances have a valid serial console
# connection, which means the ports got updated.
@@ -252,7 +241,7 @@
self.assertIn(data, console_output)
-class LiveAutoBlockMigrationV225TestJSON(LiveBlockMigrationTestJSON):
+class LiveAutoBlockMigrationV225Test(LiveMigrationTest):
min_microversion = '2.25'
max_microversion = 'latest'
block_migration = 'auto'
diff --git a/tempest/api/compute/admin/test_live_block_migration_negative.py b/tempest/api/compute/admin/test_live_migration_negative.py
similarity index 72%
rename from tempest/api/compute/admin/test_live_block_migration_negative.py
rename to tempest/api/compute/admin/test_live_migration_negative.py
index ab63154..deabbc2 100644
--- a/tempest/api/compute/admin/test_live_block_migration_negative.py
+++ b/tempest/api/compute/admin/test_live_migration_negative.py
@@ -23,10 +23,10 @@
CONF = config.CONF
-class LiveBlockMigrationNegativeTestJSON(base.BaseV2ComputeAdminTest):
+class LiveMigrationNegativeTest(base.BaseV2ComputeAdminTest):
@classmethod
def skip_checks(cls):
- super(LiveBlockMigrationNegativeTestJSON, cls).skip_checks()
+ super(LiveMigrationNegativeTest, cls).skip_checks()
if not CONF.compute_feature_enabled.live_migration:
raise cls.skipException("Live migration is not enabled")
@@ -47,3 +47,17 @@
server['id'], target_host)
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('6e2f94f5-2ee8-4830-bef5-5bc95bb0795b')
+ def test_live_block_migration_suspended(self):
+ server = self.create_test_server(wait_until="ACTIVE")
+
+ self.admin_servers_client.suspend_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'SUSPENDED')
+
+ destination_host = self.get_host_other_than(server['id'])
+
+ self.assertRaises(lib_exc.Conflict, self._migrate_server_to,
+ server['id'], destination_host)
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 937540e..c2bdf7e 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -17,6 +17,7 @@
from testtools import matchers
from tempest.api.compute import base
+from tempest.common import identity
from tempest.common import tempest_fixtures as fixtures
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -93,10 +94,11 @@
# Verify that GET shows the updated quota set of project
project_name = data_utils.rand_name('cpu_quota_project')
project_desc = project_name + '-desc'
- project = self.identity_utils.create_project(name=project_name,
- description=project_desc)
+ project = identity.identity_utils(self.os_admin).create_project(
+ name=project_name, description=project_desc)
project_id = project['id']
- self.addCleanup(self.identity_utils.delete_project, project_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ project_id)
self.adm_client.update_quota_set(project_id, ram='5120')
quota_set = self.adm_client.show_quota_set(project_id)['quota_set']
@@ -106,12 +108,12 @@
user_name = data_utils.rand_name('cpu_quota_user')
password = data_utils.rand_password()
email = user_name + '@testmail.tm'
- user = self.identity_utils.create_user(username=user_name,
- password=password,
- project=project,
- email=email)
+ user = identity.identity_utils(self.os_admin).create_user(
+ username=user_name, password=password, project=project,
+ email=email)
user_id = user['id']
- self.addCleanup(self.identity_utils.delete_user, user_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_user,
+ user_id)
self.adm_client.update_quota_set(project_id,
user_id=user_id,
@@ -125,10 +127,11 @@
# Admin can delete the resource quota set for a project
project_name = data_utils.rand_name('ram_quota_project')
project_desc = project_name + '-desc'
- project = self.identity_utils.create_project(name=project_name,
- description=project_desc)
+ project = identity.identity_utils(self.os_admin).create_project(
+ name=project_name, description=project_desc)
project_id = project['id']
- self.addCleanup(self.identity_utils.delete_project, project_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ project_id)
quota_set_default = (self.adm_client.show_quota_set(project_id)
['quota_set'])
ram_default = quota_set_default['ram']
@@ -157,8 +160,7 @@
def _restore_default_quotas(self, original_defaults):
LOG.debug("restoring quota class defaults")
- self.adm_client.update_quota_class_set(
- 'default', **original_defaults)['quota_class_set']
+ self.adm_client.update_quota_class_set('default', **original_defaults)
# NOTE(sdague): this test is problematic as it changes
# global state, and possibly needs to be part of a set of
@@ -169,7 +171,6 @@
LOG.debug("get the current 'default' quota class values")
body = (self.adm_client.show_quota_class_set('default')
['quota_class_set'])
- self.assertIn('id', body)
self.assertEqual('default', body.pop('id'))
# restore the defaults when the test is done
self.addCleanup(self._restore_default_quotas, body.copy())
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 747f320..5ef7ee4 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -13,11 +13,11 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -89,7 +89,7 @@
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7c6c8f3b-2bf6-4918-b240-57b136a66aa0')
- @test.services('network')
+ @utils.services('network')
def test_security_groups_exceed_limit(self):
# Negative test: Creation Security Groups over limit should FAIL
# Set the quota to number of used security groups
@@ -108,7 +108,7 @@
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6e9f436d-f1ed-4f8e-a493-7275dfaa4b4d')
- @test.services('network')
+ @utils.services('network')
def test_security_groups_rules_exceed_limit(self):
# Negative test: Creation of Security Group Rules should FAIL
# when we reach limit maxSecurityGroupRules
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index 8abe03a..ff9caa3 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
@@ -34,7 +34,7 @@
self.client.delete_security_group(securitygroup_id)
@decorators.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
- @test.services('network')
+ @utils.services('network')
def test_list_security_groups_list_all_tenants_filter(self):
# Admin can list security groups of all tenants
# List of all security groups created
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 0521cca..3f06c4e 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -13,8 +13,6 @@
# under the License.
from tempest.api.compute import base
-from tempest.common import compute
-from tempest.common import fixed_network
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -126,26 +124,17 @@
@decorators.idempotent_id('86c7a8f7-50cf-43a9-9bac-5b985317134f')
def test_list_servers_filter_by_exist_host(self):
# Filter the list of servers by existent host
- name = data_utils.rand_name(self.__class__.__name__ + '-server')
- network = self.get_tenant_network()
- network_kwargs = fixed_network.set_networks_kwarg(network)
- # We need to create the server as an admin, so we can't use
- # self.create_test_server() here as this method creates the server
- # in the "primary" (i.e non-admin) tenant.
- test_server, _ = compute.create_test_server(
- self.os_admin, wait_until="ACTIVE", name=name, **network_kwargs)
- self.addCleanup(self.client.delete_server, test_server['id'])
- server = self.client.show_server(test_server['id'])['server']
- self.assertEqual(server['status'], 'ACTIVE')
+ server = self.client.show_server(self.s1_id)['server']
hostname = server['OS-EXT-SRV-ATTR:host']
- params = {'host': hostname}
- body = self.client.list_servers(**params)
- servers = body['servers']
- nonexistent_params = {'host': 'nonexistent_host'}
+ params = {'host': hostname, 'all_tenants': '1'}
+ servers = self.client.list_servers(**params)['servers']
+ self.assertIn(server['id'], map(lambda x: x['id'], servers))
+
+ nonexistent_params = {'host': 'nonexistent_host',
+ 'all_tenants': '1'}
nonexistent_body = self.client.list_servers(**nonexistent_params)
nonexistent_servers = nonexistent_body['servers']
- self.assertIn(test_server['id'], map(lambda x: x['id'], servers))
- self.assertNotIn(test_server['id'],
+ self.assertNotIn(server['id'],
map(lambda x: x['id'], nonexistent_servers))
@decorators.idempotent_id('ee8ae470-db70-474d-b752-690b7892cab1')
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 9023759..f720b84 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -32,7 +32,6 @@
def setup_clients(cls):
super(ServersAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_admin.servers_client
- cls.non_adm_client = cls.servers_client
cls.quotas_client = cls.os_admin.quotas_client
@classmethod
@@ -62,7 +61,7 @@
flavor_ref = self.create_flavor(ram=ram, vcpus=vcpus, disk=disk)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.client.resize_server,
- self.servers[0]['id'],
+ self.s1_id,
flavor_ref['id'])
@decorators.idempotent_id('7368a427-2f26-4ad9-9ba9-911a0ec2b0db')
@@ -84,7 +83,7 @@
flavor_ref = self.create_flavor(ram=ram, vcpus=vcpus, disk=disk)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.client.resize_server,
- self.servers[0]['id'],
+ self.s1_id,
flavor_ref['id'])
@decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 858998a..2e7b07b 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -15,9 +15,9 @@
import testtools
from tempest.api.compute import base
+from tempest.common import compute
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -45,7 +45,7 @@
@decorators.idempotent_id('26a9d5df-6890-45f2-abc4-a659290cb130')
@testtools.skipUnless(
- test.is_scheduler_filter_enabled("SameHostFilter"),
+ compute.is_scheduler_filter_enabled("SameHostFilter"),
'SameHostFilter is not available.')
def test_create_servers_on_same_host(self):
hints = {'same_host': self.server01}
@@ -56,7 +56,7 @@
@decorators.idempotent_id('cc7ca884-6e3e-42a3-a92f-c522fcf25e8e')
@testtools.skipUnless(
- test.is_scheduler_filter_enabled("DifferentHostFilter"),
+ compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_create_servers_on_different_hosts(self):
hints = {'different_host': self.server01}
@@ -67,7 +67,7 @@
@decorators.idempotent_id('7869cc84-d661-4e14-9f00-c18cdc89cf57')
@testtools.skipUnless(
- test.is_scheduler_filter_enabled("DifferentHostFilter"),
+ compute.is_scheduler_filter_enabled("DifferentHostFilter"),
'DifferentHostFilter is not available.')
def test_create_servers_on_different_hosts_with_list_of_servers(self):
# This scheduler-hint supports list of servers also.
@@ -76,3 +76,38 @@
wait_until='ACTIVE')['id']
host02 = self._get_host(server02)
self.assertNotEqual(self.host01, host02)
+
+ @decorators.idempotent_id('f8bd0867-e459-45f5-ba53-59134552fe04')
+ @testtools.skipUnless(
+ compute.is_scheduler_filter_enabled("ServerGroupAntiAffinityFilter"),
+ 'ServerGroupAntiAffinityFilter is not available.')
+ def test_create_server_with_scheduler_hint_group_anti_affinity(self):
+ """Tests the ServerGroupAntiAffinityFilter
+
+ Creates two servers in an anti-affinity server group and
+ asserts the servers are in the group and on different hosts.
+ """
+ group_id = self.create_test_server_group(
+ policy=['anti-affinity'])['id']
+ hints = {'group': group_id}
+ reservation_id = self.create_test_server(
+ scheduler_hints=hints, wait_until='ACTIVE', min_count=2,
+ return_reservation_id=True)['reservation_id']
+
+ # Get the servers using the reservation_id.
+ servers = self.servers_client.list_servers(
+ detail=True, reservation_id=reservation_id)['servers']
+ self.assertEqual(2, len(servers))
+
+ # Assert the servers are in the group.
+ server_group = self.server_groups_client.show_server_group(
+ group_id)['server_group']
+ hosts = {}
+ for server in servers:
+ self.assertIn(server['id'], server_group['members'])
+ hosts[server['id']] = self._get_host(server['id'])
+
+ # Assert the servers are on different hosts.
+ hostnames = list(hosts.values())
+ self.assertNotEqual(hostnames[0], hostnames[1],
+ 'Servers are on the same host: %s' % hosts)
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 22a5bc4..d715a42 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -13,11 +13,11 @@
import time
from tempest.api.compute import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -80,7 +80,7 @@
raise lib_exc.TimeoutException(message)
@decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
- @test.services('volume')
+ @utils.services('volume')
def test_volume_swap(self):
# Create two volumes.
# NOTE(gmann): Volumes are created before server creation so that
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 429ded5..9ee8858 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -97,8 +97,8 @@
cls.security_group_default_rules_client = (
cls.os_primary.security_group_default_rules_client)
cls.versions_client = cls.os_primary.compute_versions_client
-
- cls.volumes_client = cls.os_primary.volumes_v2_client
+ if CONF.service_available.cinder:
+ cls.volumes_client = cls.os_primary.volumes_client_latest
@classmethod
def resource_setup(cls):
@@ -116,42 +116,6 @@
cls.ssh_user = CONF.validation.image_ssh_user
cls.image_ssh_user = CONF.validation.image_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
- cls.servers = []
- cls.images = []
- cls.security_groups = []
- cls.server_groups = []
- cls.volumes = []
-
- @classmethod
- def resource_cleanup(cls):
- cls.clear_resources('images', cls.images,
- cls.compute_images_client.delete_image)
- cls.clear_servers()
- cls.clear_resources('security groups', cls.security_groups,
- cls.security_groups_client.delete_security_group)
- cls.clear_resources('server groups', cls.server_groups,
- cls.server_groups_client.delete_server_group)
- cls.clear_volumes()
- super(BaseV2ComputeTest, cls).resource_cleanup()
-
- @classmethod
- def clear_servers(cls):
- LOG.debug('Clearing servers: %s', ','.join(
- server['id'] for server in cls.servers))
- for server in cls.servers:
- try:
- test_utils.call_and_ignore_notfound_exc(
- cls.servers_client.delete_server, server['id'])
- except Exception:
- LOG.exception('Deleting server %s failed', server['id'])
-
- for server in cls.servers:
- try:
- waiters.wait_for_server_termination(cls.servers_client,
- server['id'])
- except Exception:
- LOG.exception('Waiting for deletion of server %s failed',
- server['id'])
@classmethod
def server_check_teardown(cls):
@@ -190,7 +154,7 @@
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
- **kwargs):
+ validation_resources=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
@@ -200,6 +164,10 @@
:param validatable: Whether the server will be pingable or sshable.
:param volume_backed: Whether the instance is volume backed or not.
+ :param validation_resources: Dictionary of validation resources as
+ returned by `get_class_validation_resources`.
+ :param kwargs: Extra arguments are passed down to the
+ `compute.create_test_server` call.
"""
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
@@ -216,12 +184,20 @@
body, servers = compute.create_test_server(
cls.os_primary,
validatable,
- validation_resources=cls.validation_resources,
+ validation_resources=validation_resources,
tenant_network=tenant_network,
volume_backed=volume_backed,
**kwargs)
- cls.servers.extend(servers)
+ # For each server schedule wait and delete, so we first delete all
+ # and then wait for all
+ for server in servers:
+ cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+ cls.servers_client, server['id'])
+ for server in servers:
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.servers_client.delete_server, server['id'])
return body
@@ -233,7 +209,10 @@
description = data_utils.rand_name('description')
body = cls.security_groups_client.create_security_group(
name=name, description=description)['security_group']
- cls.security_groups.append(body['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.security_groups_client.delete_security_group,
+ body['id'])
return body
@@ -245,7 +224,10 @@
policy = ['affinity']
body = cls.server_groups_client.create_server_group(
name=name, policies=policy)['server_group']
- cls.server_groups.append(body['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.server_groups_client.delete_server_group,
+ body['id'])
return body
def wait_for(self, condition):
@@ -263,18 +245,6 @@
return
time.sleep(self.build_interval)
- @staticmethod
- def _delete_volume(volumes_client, volume_id):
- """Deletes the given volume and waits for it to be gone."""
- try:
- volumes_client.delete_volume(volume_id)
- # TODO(mriedem): We should move the wait_for_resource_deletion
- # into the delete_volume method as a convenience to the caller.
- volumes_client.wait_for_resource_deletion(volume_id)
- except lib_exc.NotFound:
- LOG.warning("Unable to delete volume '%s' since it was not found. "
- "Maybe it was already deleted?", volume_id)
-
@classmethod
def prepare_instance_network(cls):
if (CONF.validation.auth_method != 'disabled' and
@@ -292,8 +262,14 @@
image = cls.compute_images_client.create_image(server_id, name=name,
**kwargs)
- image_id = data_utils.parse_image_id(image.response['location'])
- cls.images.append(image_id)
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.compute_images_client.delete_image,
+ image_id)
if wait_until is not None:
try:
@@ -325,14 +301,34 @@
return image
@classmethod
- def rebuild_server(cls, server_id, validatable=False, **kwargs):
- # Destroy an existing server and creates a new one
+ def recreate_server(cls, server_id, validatable=False, **kwargs):
+ """Destroy an existing class level server and creates a new one
+
+ Some test classes use a test server that can be used by multiple
+ tests. This is done to optimise runtime and test load.
+ If something goes wrong with the test server, it can be rebuilt
+ using this helper.
+
+ This helper can also be used for the initial provisioning if no
+ server_id is specified.
+
+ :param server_id: UUID of the server to be rebuilt. If None is
+ specified, a new server is provisioned.
+ :param validatable: whether to the server needs to be
+ validatable. When True, validation resources are acquired via
+ the `get_class_validation_resources` helper.
+ :param kwargs: extra paramaters are passed through to the
+ `create_test_server` call.
+ :return: the UUID of the created server.
+ """
if server_id:
cls.delete_server(server_id)
cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
+ validation_resources=cls.get_class_validation_resources(
+ cls.os_primary),
wait_until='ACTIVE',
adminPass=cls.password,
**kwargs)
@@ -360,17 +356,33 @@
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_client, volume_id)
+ try:
+ cls.volumes_client.delete_volume(volume_id)
+ # TODO(mriedem): We should move the wait_for_resource_deletion
+ # into the delete_volume method as a convenience to the caller.
+ cls.volumes_client.wait_for_resource_deletion(volume_id)
+ except lib_exc.NotFound:
+ LOG.warning("Unable to delete volume '%s' since it was not found. "
+ "Maybe it was already deleted?", volume_id)
@classmethod
- def get_server_ip(cls, server):
+ def get_server_ip(cls, server, validation_resources=None):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
+
+ :param server: The server dict as returned by the API
+ :param validation_resources: The dict of validation resources
+ provisioned for the server.
"""
if CONF.validation.connect_method == 'floating':
- return cls.validation_resources['floating_ip']['ip']
+ if validation_resources:
+ return validation_resources['floating_ip']['ip']
+ else:
+ msg = ('When validation.connect_method equals floating, '
+ 'validation_resources cannot be None')
+ raise exceptions.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
@@ -401,30 +413,33 @@
if image_ref is not None:
kwargs['imageRef'] = image_ref
volume = cls.volumes_client.create_volume(**kwargs)['volume']
- cls.volumes.append(volume)
+ cls.addClassResourceCleanup(
+ cls.volumes_client.wait_for_resource_deletion, volume['id'])
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.volumes_client.delete_volume,
+ volume['id'])
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], 'available')
return volume
- @classmethod
- def clear_volumes(cls):
- LOG.debug('Clearing volumes: %s', ','.join(
- volume['id'] for volume in cls.volumes))
- for volume in cls.volumes:
- try:
- test_utils.call_and_ignore_notfound_exc(
- cls.volumes_client.delete_volume, volume['id'])
- except Exception:
- LOG.exception('Deleting volume %s failed', volume['id'])
+ def _detach_volume(self, server, volume):
+ """Helper method to detach a volume.
- for volume in cls.volumes:
- try:
- cls.volumes_client.wait_for_resource_deletion(volume['id'])
- except Exception:
- LOG.exception('Waiting for deletion of volume %s failed',
- volume['id'])
+ Ignores 404 responses if the volume or server do not exist, or the
+ volume is already detached from the server.
+ """
+ try:
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+ # Check the status. You can only detach an in-use volume, otherwise
+ # the compute API will return a 400 response.
+ if volume['status'] == 'in-use':
+ self.servers_client.detach_volume(server['id'], volume['id'])
+ except lib_exc.NotFound:
+ # Ignore 404s on detach in case the server is deleted or the volume
+ # is already detached.
+ pass
- def attach_volume(self, server, volume, device=None):
+ def attach_volume(self, server, volume, device=None, check_reserved=False):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -433,10 +448,15 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
+ :param check_reserved: Consider a status of reserved as valid for
+ completion. This is to handle new Cinder attach where we more
+ accurately use 'reserved' for things like attaching to a shelved
+ server.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
+
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
@@ -446,11 +466,12 @@
self.volumes_client, volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.servers_client.detach_volume,
- server['id'], volume['id'])
+ self.addCleanup(self._detach_volume, server, volume)
+ statuses = ['in-use']
+ if check_reserved:
+ statuses.append('reserved')
waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'in-use')
+ volume['id'], statuses)
return attachment
@@ -479,3 +500,19 @@
self.addCleanup(client.wait_for_resource_deletion, flavor['id'])
self.addCleanup(client.delete_flavor, flavor['id'])
return flavor
+
+ def get_host_for_server(self, server_id):
+ server_details = self.admin_servers_client.show_server(server_id)
+ return server_details['server']['OS-EXT-SRV-ATTR:host']
+
+ def get_host_other_than(self, server_id):
+ source_host = self.get_host_for_server(server_id)
+
+ hypers = self.os_admin.hypervisor_client.list_hypervisors(
+ )['hypervisors']
+ hosts = [hyper['hypervisor_hostname'] for hyper in hypers
+ if hyper['state'] == 'up' and hyper['status'] == 'enabled']
+
+ for target_host in hosts:
+ if source_host != target_host:
+ return target_host
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index a39fec9..0e6c016 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -31,14 +31,9 @@
@decorators.idempotent_id('c070a441-b08e-447e-a733-905909535b1b')
def test_create_root_certificate(self):
# create certificates
- body = self.certificates_client.create_certificate()['certificate']
- self.assertIn('data', body)
- self.assertIn('private_key', body)
+ self.certificates_client.create_certificate()
@decorators.idempotent_id('3ac273d0-92d2-4632-bdfc-afbc21d4606c')
def test_get_root_certificate(self):
# get the root certificate
- body = (self.certificates_client.show_certificate('root')
- ['certificate'])
- self.assertIn('data', body)
- self.assertIn('private_key', body)
+ self.certificates_client.show_certificate('root')
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index d5bb45a..20294e9 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -18,8 +18,6 @@
class FlavorsV2TestJSON(base.BaseV2ComputeTest):
- _min_disk = 'minDisk'
- _min_ram = 'minRam'
@decorators.attr(type='smoke')
@decorators.idempotent_id('e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59')
@@ -89,7 +87,7 @@
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
flavor_id = flavor['id']
- params = {self._min_disk: flavor['disk'] + 1}
+ params = {'minDisk': flavor['disk'] + 1}
flavors = self.flavors_client.list_flavors(detail=True,
**params)['flavors']
self.assertEmpty([i for i in flavors if i['id'] == flavor_id])
@@ -100,7 +98,7 @@
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
flavor_id = flavor['id']
- params = {self._min_ram: flavor['ram'] + 1}
+ params = {'minRam': flavor['ram'] + 1}
flavors = self.flavors_client.list_flavors(detail=True,
**params)['flavors']
self.assertEmpty([i for i in flavors if i['id'] == flavor_id])
@@ -111,7 +109,7 @@
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
flavor_id = flavor['id']
- params = {self._min_disk: flavor['disk'] + 1}
+ params = {'minDisk': flavor['disk'] + 1}
flavors = self.flavors_client.list_flavors(**params)['flavors']
self.assertEmpty([i for i in flavors if i['id'] == flavor_id])
@@ -121,6 +119,6 @@
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
flavor_id = flavor['id']
- params = {self._min_ram: flavor['ram'] + 1}
+ params = {'minRam': flavor['ram'] + 1}
flavors = self.flavors_client.list_flavors(**params)['flavors']
self.assertEmpty([i for i in flavors if i['id'] == flavor_id])
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index ebb9d2e..efd4f0e 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -19,11 +19,11 @@
from tempest.api.compute import base
from tempest.common import image as common_image
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -43,7 +43,7 @@
'[image-feature-enabled].')
@decorators.attr(type=['negative'])
- @test.services('image')
+ @utils.services('image')
@decorators.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
def test_boot_with_low_ram(self):
"""Try boot a vm with lower than min ram
diff --git a/tempest/api/compute/floating_ips/base.py b/tempest/api/compute/floating_ips/base.py
index 142eaec..262a3c1 100644
--- a/tempest/api/compute/floating_ips/base.py
+++ b/tempest/api/compute/floating_ips/base.py
@@ -14,6 +14,10 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
+from tempest import config
+
+CONF = config.CONF
class BaseFloatingIPsTest(base.BaseV2ComputeTest):
@@ -24,3 +28,17 @@
cls.set_network_resources(network=True, subnet=True,
router=True, dhcp=True)
super(BaseFloatingIPsTest, cls).setup_credentials()
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseFloatingIPsTest, cls).skip_checks()
+ if not utils.get_service_list()['network']:
+ raise cls.skipException("network service not enabled.")
+ if not CONF.network_feature_enabled.floating_ips:
+ raise cls.skipException("Floating ips are not available")
+
+ @classmethod
+ def setup_clients(cls):
+ super(BaseFloatingIPsTest, cls).setup_clients()
+ cls.client = cls.floating_ips_client
+ cls.pools_client = cls.floating_ip_pools_client
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index faa7b5d..2adc482 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -20,49 +20,15 @@
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
class FloatingIPsTestJSON(base.BaseFloatingIPsTest):
- server_id = None
- floating_ip = None
- @classmethod
- def skip_checks(cls):
- super(FloatingIPsTestJSON, cls).skip_checks()
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPsTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
-
- @classmethod
- def resource_setup(cls):
- super(FloatingIPsTestJSON, cls).resource_setup()
- cls.floating_ip_id = None
-
- # Server creation
- server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
- # Floating IP creation
- body = cls.client.create_floating_ip(
- pool=CONF.network.floating_network_name)['floating_ip']
- cls.floating_ip_id = body['id']
- cls.floating_ip = body['ip']
-
- @classmethod
- def resource_cleanup(cls):
- # Deleting the floating IP which is created in this method
- if cls.floating_ip_id:
- cls.client.delete_floating_ip(cls.floating_ip_id)
- super(FloatingIPsTestJSON, cls).resource_cleanup()
+ max_microversion = '2.35'
@decorators.idempotent_id('f7bfb946-297e-41b8-9e8c-aba8e9bb5194')
- @test.services('network')
def test_allocate_floating_ip(self):
# Positive test:Allocation of a new floating IP to a project
# should be successful
@@ -78,7 +44,6 @@
self.assertIn(floating_ip_details, body)
@decorators.idempotent_id('de45e989-b5ca-4a9b-916b-04a52e7bbb8b')
- @test.services('network')
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
@@ -92,8 +57,26 @@
# Check it was really deleted.
self.client.wait_for_resource_deletion(floating_ip_body['id'])
+
+class FloatingIPsAssociationTestJSON(base.BaseFloatingIPsTest):
+
+ max_microversion = '2.43'
+
+ @classmethod
+ def resource_setup(cls):
+ super(FloatingIPsAssociationTestJSON, cls).resource_setup()
+
+ # Server creation
+ cls.server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = cls.server['id']
+ # Floating IP creation
+ body = cls.client.create_floating_ip(
+ pool=CONF.network.floating_network_name)['floating_ip']
+ cls.addClassResourceCleanup(cls.client.delete_floating_ip, body['id'])
+ cls.floating_ip_id = body['id']
+ cls.floating_ip = body['ip']
+
@decorators.idempotent_id('307efa27-dc6f-48a0-8cd2-162ce3ef0b52')
- @test.services('network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_associate_disassociate_floating_ip(self):
@@ -116,7 +99,6 @@
self.server_id)
@decorators.idempotent_id('6edef4b2-aaf1-4abc-bbe3-993e2561e0fe')
- @test.services('network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_associate_already_associated_floating_ip(self):
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
index 483bd95..9257458 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
@@ -20,31 +20,18 @@
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
class FloatingIPsNegativeTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPsNegativeTestJSON, cls).skip_checks()
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPsNegativeTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
+ max_microversion = '2.35'
@classmethod
def resource_setup(cls):
super(FloatingIPsNegativeTestJSON, cls).resource_setup()
- # Server creation
- server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
# Generating a nonexistent floatingIP id
body = cls.client.list_floating_ips()['floating_ips']
floating_ip_ids = [floating_ip['id'] for floating_ip in body]
@@ -58,7 +45,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6e0f059b-e4dd-48fb-8207-06e3bba5b074')
- @test.services('network')
def test_allocate_floating_ip_from_nonexistent_pool(self):
# Negative test:Allocation of a new floating IP from a nonexistent_pool
# to a project should fail
@@ -68,7 +54,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ae1c55a8-552b-44d4-bfb6-2a115a15d0ba')
- @test.services('network')
def test_delete_nonexistent_floating_ip(self):
# Negative test:Deletion of a nonexistent floating IP
# from project should fail
@@ -77,9 +62,19 @@
self.assertRaises(lib_exc.NotFound, self.client.delete_floating_ip,
self.non_exist_id)
+
+class FloatingIPsAssociationNegativeTestJSON(base.BaseFloatingIPsTest):
+
+ max_microversion = '2.43'
+
+ @classmethod
+ def resource_setup(cls):
+ super(FloatingIPsAssociationNegativeTestJSON, cls).resource_setup()
+ cls.server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = cls.server['id']
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('595fa616-1a71-4670-9614-46564ac49a4c')
- @test.services('network')
def test_associate_nonexistent_floating_ip(self):
# Negative test:Association of a non existent floating IP
# to specific server should fail
@@ -90,7 +85,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0a081a66-e568-4e6b-aa62-9587a876dca8')
- @test.services('network')
def test_dissociate_nonexistent_floating_ip(self):
# Negative test:Dissociation of a non existent floating IP should fail
# Dissociating non existent floating IP
@@ -100,7 +94,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('804b4fcb-bbf5-412f-925d-896672b61eb3')
- @test.services('network')
def test_associate_ip_to_server_without_passing_floating_ip(self):
# Negative test:Association of empty floating IP to specific server
# should raise NotFound or BadRequest(In case of Nova V2.1) exception.
@@ -110,7 +103,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('58a80596-ffb2-11e6-9393-fa163e4fa634')
- @test.services('network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_associate_ip_to_server_with_floating_ip(self):
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 913b992..944f798 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -13,47 +13,29 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
+from tempest.api.compute.floating_ips import base
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
-class FloatingIPDetailsTestJSON(base.BaseV2ComputeTest):
+class FloatingIPDetailsTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPDetailsTestJSON, cls).skip_checks()
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPDetailsTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
- cls.pools_client = cls.floating_ip_pools_client
+ max_microversion = '2.35'
@classmethod
def resource_setup(cls):
super(FloatingIPDetailsTestJSON, cls).resource_setup()
cls.floating_ip = []
- cls.floating_ip_id = []
for _ in range(3):
body = cls.client.create_floating_ip(
pool=CONF.network.floating_network_name)['floating_ip']
+ cls.addClassResourceCleanup(cls.client.delete_floating_ip,
+ body['id'])
cls.floating_ip.append(body)
- cls.floating_ip_id.append(body['id'])
-
- @classmethod
- def resource_cleanup(cls):
- for f_id in cls.floating_ip_id:
- cls.client.delete_floating_ip(f_id)
- super(FloatingIPDetailsTestJSON, cls).resource_cleanup()
@decorators.idempotent_id('16db31c3-fb85-40c9-bbe2-8cf7b67ff99f')
- @test.services('network')
def test_list_floating_ips(self):
# Positive test:Should return the list of floating IPs
body = self.client.list_floating_ips()['floating_ips']
@@ -64,7 +46,6 @@
self.assertIn(self.floating_ip[i], floating_ips)
@decorators.idempotent_id('eef497e0-8ff7-43c8-85ef-558440574f84')
- @test.services('network')
def test_get_floating_ip_details(self):
# Positive test:Should be able to GET the details of floatingIP
# Creating a floating IP for which details are to be checked
@@ -86,7 +67,6 @@
self.assertEqual(floating_ip_id, body['id'])
@decorators.idempotent_id('df389fc8-56f5-43cc-b290-20eda39854d3')
- @test.services('network')
def test_list_floating_ip_pools(self):
# Positive test:Should return the list of floating IP Pools
floating_ip_pools = self.pools_client.list_floating_ip_pools()
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py b/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
index b5bbb8c..d69248c 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
@@ -13,32 +13,21 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
+from tempest.api.compute.floating_ips import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
-class FloatingIPDetailsNegativeTestJSON(base.BaseV2ComputeTest):
+class FloatingIPDetailsNegativeTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPDetailsNegativeTestJSON, cls).skip_checks()
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPDetailsNegativeTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
+ max_microversion = '2.35'
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7ab18834-4a4b-4f28-a2c5-440579866695')
- @test.services('network')
def test_get_nonexistent_floating_ip_details(self):
# Negative test:Should not be able to GET the details
# of non-existent floating IP
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 8d503dc..b497626 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -70,7 +71,9 @@
body = cls.glance_client.create_image(**params)
body = body['image'] if 'image' in body else body
cls.image_id = body['id']
- cls.images.append(cls.image_id)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.glance_client.delete_image,
+ cls.image_id)
image_file = six.BytesIO((b'*' * 1024))
if CONF.image_feature_enabled.api_v1:
cls.glance_client.update_image(cls.image_id, data=image_file)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 5987d39..058e7e6 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -15,6 +15,7 @@
from tempest.api.compute import base
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -74,7 +75,6 @@
# Verify the image was deleted correctly
self.client.delete_image(image['id'])
- self.images.remove(image['id'])
self.client.wait_for_resource_deletion(image['id'])
@decorators.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
@@ -87,5 +87,9 @@
# 4 byte utf-8 character.
utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
body = self.client.create_image(self.server_id, name=utf8_name)
- image_id = data_utils.parse_image_id(body.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", body.response, "lt"):
+ image_id = body['image_id']
+ else:
+ image_id = data_utils.parse_image_id(body.response['location'])
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index cf32ba3..a2e58c9 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -19,6 +19,7 @@
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -51,7 +52,7 @@
self._reset_server()
def _reset_server(self):
- self.__class__.server_id = self.rebuild_server(self.server_id)
+ self.__class__.server_id = self.recreate_server(self.server_id)
@classmethod
def skip_checks(cls):
@@ -105,9 +106,12 @@
self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
self.server_id)
- image_id = data_utils.parse_image_id(image.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
self.client.delete_image(image_id)
- self.images.remove(image_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
@@ -124,12 +128,15 @@
# Return an error while trying to delete an image what is creating
image = self.create_image_from_server(self.server_id)
- image_id = data_utils.parse_image_id(image.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
self.addCleanup(self._reset_server)
# Do not wait, attempt to delete the image, ensure it's successful
self.client.delete_image(image_id)
- self.images.remove(image_id)
self.assertRaises(lib_exc.NotFound,
self.client.show_image, image_id)
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index acc8b3e..d83d8df 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -23,6 +23,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -74,7 +75,10 @@
body = cls.glance_client.create_image(**params)
body = body['image'] if 'image' in body else body
image_id = body['id']
- cls.images.append(image_id)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.compute_images_client.delete_image,
+ image_id)
# Wait 1 second between creation and upload to ensure a delta
# between created_at and updated_at.
time.sleep(1)
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 0b7a967..3a54d51 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -51,13 +51,10 @@
# Keypair should be created, verified and deleted
k_name = data_utils.rand_name('keypair')
keypair = self.create_keypair(k_name)
- private_key = keypair['private_key']
key_name = keypair['name']
self.assertEqual(key_name, k_name,
"The created keypair name is not equal "
"to the requested name")
- self.assertIsNotNone(private_key,
- "Field private_key is empty or not found.")
@decorators.idempotent_id('a4233d5d-52d8-47cc-9a25-e1864527e3df')
def test_get_keypair_detail(self):
@@ -65,14 +62,9 @@
k_name = data_utils.rand_name('keypair')
self.create_keypair(k_name)
keypair_detail = self.client.show_keypair(k_name)['keypair']
- self.assertIn('name', keypair_detail)
- self.assertIn('public_key', keypair_detail)
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
- public_key = keypair_detail['public_key']
- self.assertIsNotNone(public_key,
- "Field public_key is empty or not found.")
@decorators.idempotent_id('39c90c6a-304a-49dd-95ec-2366129def05')
def test_keypair_create_with_pub_key(self):
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index 6148e16..54a6da8 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest import test
CONF = config.CONF
@@ -24,6 +24,12 @@
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
@classmethod
+ def skip_checks(cls):
+ super(BaseSecurityGroupsTest, cls).skip_checks()
+ if not utils.get_service_list()['network']:
+ raise cls.skipException("network service not enabled.")
+
+ @classmethod
def setup_credentials(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
@@ -32,7 +38,7 @@
@staticmethod
def generate_random_security_group_id():
if (CONF.service_available.neutron and
- test.is_extension_enabled('security-group', 'network')):
+ utils.is_extension_enabled('security-group', 'network')):
return data_utils.rand_uuid()
else:
return data_utils.rand_int_id(start=999)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 124db0e..4c99ea6 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -15,7 +15,6 @@
from tempest.api.compute.security_groups import base
from tempest.lib import decorators
-from tempest import test
class SecurityGroupRulesTestJSON(base.BaseSecurityGroupsTest):
@@ -55,7 +54,6 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('850795d7-d4d3-4e55-b527-a774c0123d3a')
- @test.services('network')
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
# should be successful
@@ -73,7 +71,6 @@
self._check_expected_response(rule)
@decorators.idempotent_id('7a01873e-3c38-4f30-80be-31a043cfe2fd')
- @test.services('network')
def test_security_group_rules_create_with_optional_cidr(self):
# Positive test: Creation of Security Group rule
# with optional argument cidr
@@ -96,7 +93,6 @@
self._check_expected_response(rule)
@decorators.idempotent_id('7f5d2899-7705-4d4b-8458-4505188ffab6')
- @test.services('network')
def test_security_group_rules_create_with_optional_group_id(self):
# Positive test: Creation of Security Group rule
# with optional argument group_id
@@ -125,7 +121,6 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('a6154130-5a55-4850-8be4-5e9e796dbf17')
- @test.services('network')
def test_security_group_rules_list(self):
# Positive test: Created Security Group rules should be
# in the list of all rules
@@ -163,7 +158,6 @@
self.assertNotEmpty([i for i in rules if i['id'] == rule2_id])
@decorators.idempotent_id('fc5c5acf-2091-43a6-a6ae-e42760e9ffaf')
- @test.services('network')
def test_security_group_rules_delete_when_peer_group_deleted(self):
# Positive test:rule will delete when peer group deleting
# Creating a Security Group to add rules to it
@@ -178,7 +172,7 @@
ip_protocol=self.ip_protocol,
from_port=self.from_port,
to_port=self.to_port,
- group_id=sg2_id)['security_group_rule']
+ group_id=sg2_id)
# Delete group2
self.security_groups_client.delete_security_group(sg2_id)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules_negative.py b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
index 4efb8b7..8283aae 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules_negative.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
@@ -17,7 +17,6 @@
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class SecurityGroupRulesNegativeTestJSON(base.BaseSecurityGroupsTest):
@@ -29,7 +28,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1d507e98-7951-469b-82c3-23f1e6b8c254')
- @test.services('network')
def test_create_security_group_rule_with_non_existent_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with non existent Parent group id
@@ -46,7 +44,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('2244d7e4-adb7-4ecb-9930-2d77e123ce4f')
- @test.services('network')
def test_create_security_group_rule_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with Parent group id which is not integer
@@ -63,7 +60,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('8bd56d02-3ffa-4d67-9933-b6b9a01d6089')
- @test.services('network')
def test_create_security_group_rule_duplicate(self):
# Negative test: Create Security Group rule duplicate should fail
# Creating a Security Group to add rule to it
@@ -88,7 +84,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('84c81249-9f6e-439c-9bbf-cbb0d2cddbdf')
- @test.services('network')
def test_create_security_group_rule_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
@@ -108,7 +103,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('12bbc875-1045-4f7a-be46-751277baedb9')
- @test.services('network')
def test_create_security_group_rule_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
@@ -127,7 +121,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ff88804d-144f-45d1-bf59-dd155838a43a')
- @test.services('network')
def test_create_security_group_rule_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
@@ -146,7 +139,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('00296fa9-0576-496a-ae15-fbab843189e0')
- @test.services('network')
def test_create_security_group_rule_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
@@ -165,7 +157,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('56fddcca-dbb8-4494-a0db-96e9f869527c')
- @test.services('network')
def test_delete_security_group_rule_with_non_existent_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with non existent id
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 930a58e..62d5bea 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -18,7 +18,6 @@
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class SecurityGroupsTestJSON(base.BaseSecurityGroupsTest):
@@ -30,7 +29,6 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('eb2b087d-633d-4d0d-a7bd-9e6ba35b32de')
- @test.services('network')
def test_security_groups_create_list_delete(self):
# Positive test:Should return the list of Security Groups
# Create 3 Security Groups
@@ -54,22 +52,19 @@
self.client.wait_for_resource_deletion(sg['id'])
# Now check if all the created Security Groups are deleted
fetched_list = self.client.list_security_groups()['security_groups']
- deleted_sgs = \
- [sg for sg in security_group_list if sg in fetched_list]
+ deleted_sgs = [sg for sg in security_group_list if sg in fetched_list]
self.assertFalse(deleted_sgs,
"Failed to delete Security Group %s "
"list" % ', '.join(m_group['name']
for m_group in deleted_sgs))
@decorators.idempotent_id('ecc0da4a-2117-48af-91af-993cca39a615')
- @test.services('network')
def test_security_group_create_get_delete(self):
# Security Group should be created, fetched and deleted
# with char space between name along with
# leading and trailing spaces
s_name = ' %s ' % data_utils.rand_name('securitygroup ')
securitygroup = self.create_security_group(name=s_name)
- self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
@@ -84,7 +79,6 @@
self.client.wait_for_resource_deletion(securitygroup['id'])
@decorators.idempotent_id('fe4abc0d-83f5-4c50-ad11-57a1127297a2')
- @test.services('network')
def test_server_security_groups(self):
# Checks that security groups may be added and linked to a server
# and not deleted if the server is active.
@@ -126,12 +120,10 @@
self.client.delete_security_group(sg2['id'])
@decorators.idempotent_id('7d4e1d3c-3209-4d6d-b020-986304ebad1f')
- @test.services('network')
def test_update_security_groups(self):
# Update security group name and description
# Create a security group
securitygroup = self.create_security_group()
- self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
# Update the name and description
s_new_name = data_utils.rand_name('sg-hth')
@@ -146,7 +138,6 @@
self.assertEqual(s_new_des, fetched_group['description'])
@decorators.idempotent_id('79517d60-535a-438f-af3d-e6feab1cbea7')
- @test.services('network')
def test_list_security_groups_by_server(self):
# Create a couple security groups that we will use
# for the server resource this test creates
diff --git a/tempest/api/compute/security_groups/test_security_groups_negative.py b/tempest/api/compute/security_groups/test_security_groups_negative.py
index 207778a..9c44bb2 100644
--- a/tempest/api/compute/security_groups/test_security_groups_negative.py
+++ b/tempest/api/compute/security_groups/test_security_groups_negative.py
@@ -20,7 +20,6 @@
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -34,7 +33,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('673eaec1-9b3e-48ed-bdf1-2786c1b9661c')
- @test.services('network')
def test_security_group_get_nonexistent_group(self):
# Negative test:Should not be able to GET the details
# of non-existent Security Group
@@ -46,7 +44,6 @@
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1759c3cb-b0fc-44b7-86ce-c99236be911d')
- @test.services('network')
def test_security_group_create_with_invalid_group_name(self):
# Negative test: Security Group should not be created with group name
# as an empty string/with white spaces/chars more than 255
@@ -69,7 +66,6 @@
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('777b6f14-aca9-4758-9e84-38783cfa58bc')
- @test.services('network')
def test_security_group_create_with_invalid_group_description(self):
# Negative test: Security Group should not be created with description
# longer than 255 chars. Empty description is allowed by the API
@@ -85,7 +81,6 @@
@testtools.skipIf(CONF.service_available.neutron,
"Neutron allows duplicate names for security groups")
@decorators.attr(type=['negative'])
- @test.services('network')
def test_security_group_create_with_duplicate_name(self):
# Negative test:Security Group with duplicate name should not
# be created
@@ -99,7 +94,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('36a1629f-c6da-4a26-b8b8-55e7e5d5cd58')
- @test.services('network')
def test_delete_the_default_security_group(self):
# Negative test:Deletion of the "default" Security Group should Fail
default_security_group_id = None
@@ -115,7 +109,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6727c00b-214c-4f9e-9a52-017ac3e98411')
- @test.services('network')
def test_delete_nonexistent_security_group(self):
# Negative test:Deletion of a non-existent Security Group should fail
non_exist_id = self.generate_random_security_group_id()
@@ -124,7 +117,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1438f330-8fa4-4aeb-8a94-37c250106d7f')
- @test.services('network')
def test_delete_security_group_without_passing_id(self):
# Negative test:Deletion of a Security Group with out passing ID
# should Fail
@@ -135,7 +127,6 @@
@testtools.skipIf(CONF.service_available.neutron,
"Neutron does not check the security group ID")
@decorators.attr(type=['negative'])
- @test.services('network')
def test_update_security_group_with_invalid_sg_id(self):
# Update security_group with invalid sg_id should fail
s_name = data_utils.rand_name('sg')
@@ -150,11 +141,9 @@
@testtools.skipIf(CONF.service_available.neutron,
"Neutron does not check the security group name")
@decorators.attr(type=['negative'])
- @test.services('network')
def test_update_security_group_with_invalid_sg_name(self):
# Update security_group with invalid sg_name should fail
securitygroup = self.create_security_group()
- self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
# Update Security Group with group name longer than 255 chars
s_new_name = 'securitygroup-'.ljust(260, '0')
@@ -166,11 +155,9 @@
@testtools.skipIf(CONF.service_available.neutron,
"Neutron does not check the security group description")
@decorators.attr(type=['negative'])
- @test.services('network')
def test_update_security_group_with_invalid_sg_des(self):
# Update security_group with invalid sg_des should fail
securitygroup = self.create_security_group()
- self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
# Update Security Group with group description longer than 255 chars
s_new_des = 'des-'.ljust(260, '0')
@@ -180,7 +167,6 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('27edee9c-873d-4da6-a68a-3c256efebe8f')
- @test.services('network')
def test_update_non_existent_security_group(self):
# Update a non-existent Security Group should Fail
non_exist_id = self.generate_random_security_group_id()
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 65d5042..0e8f681 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -15,14 +15,16 @@
import time
+import six
+
from tempest.api.compute import base
from tempest.common import compute
+from tempest.common import utils
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -76,8 +78,11 @@
return port
- def _check_interface(self, iface, port_id=None, network_id=None,
- fixed_ip=None, mac_addr=None):
+ def _check_interface(self, iface, server_id=None, port_id=None,
+ network_id=None, fixed_ip=None, mac_addr=None):
+ if server_id:
+ iface = waiters.wait_for_interface_status(
+ self.interfaces_client, server_id, iface['port_id'], 'ACTIVE')
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
@@ -101,16 +106,14 @@
['interfaceAttachment'])
iface = waiters.wait_for_interface_status(
self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
network_id = ifs[0]['net_id']
iface = self.interfaces_client.create_interface(
server['id'], net_id=network_id)['interfaceAttachment']
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, network_id=network_id)
+ self._check_interface(iface, server_id=server['id'],
+ network_id=network_id)
return iface
def _test_create_interface_by_port_id(self, server, ifs):
@@ -120,9 +123,8 @@
self.addCleanup(self.ports_client.delete_port, port_id)
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, port_id=port_id)
+ self._check_interface(iface, server_id=server['id'], port_id=port_id,
+ network_id=network_id)
return iface
def _test_create_interface_by_fixed_ips(self, server, ifs):
@@ -139,9 +141,8 @@
server['id'], net_id=network_id,
fixed_ips=fixed_ips)['interfaceAttachment']
self.addCleanup(self.ports_client.delete_port, iface['port_id'])
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, fixed_ip=ip_list[0])
+ self._check_interface(iface, server_id=server['id'],
+ fixed_ip=ip_list[0])
return iface
def _test_show_interface(self, server, ifs):
@@ -183,19 +184,18 @@
self.assertEqual(sorted(list1), sorted(list2))
@decorators.idempotent_id('73fe8f02-590d-4bf1-b184-e9ca81065051')
- @test.services('network')
+ @utils.services('network')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
- self._check_interface(ifs[0])
try:
iface = self._test_create_interface(server)
except lib_exc.BadRequest as e:
msg = ('Multiple possible networks found, use a Network ID to be '
'more specific.')
- if not CONF.compute.fixed_network_name and e.message == msg:
+ if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
raise
else:
ifs.append(iface)
@@ -220,13 +220,12 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('c7e0e60b-ee45-43d0-abeb-8596fd42a2f9')
- @test.services('network')
+ @utils.services('network')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
- self._check_interface(ifs[0])
network_id = ifs[0]['net_id']
self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
# Remove the fixed IP from server.
@@ -243,7 +242,6 @@
break
self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
- @decorators.skip_because(bug='1607714')
@decorators.idempotent_id('2f3a0127-95c7-4977-92d2-bc5aec602fb4')
def test_reassign_port_between_servers(self):
"""Tests the following:
@@ -273,7 +271,8 @@
# attach the port to the server
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
- self._check_interface(iface, port_id=port_id)
+ self._check_interface(iface, server_id=server['id'],
+ port_id=port_id)
# detach the port from the server; this is a cast in the compute
# API so we have to poll the port until the device_id is unset.
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index ffadd96..c660821 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -17,17 +17,18 @@
import testtools
from tempest.api.compute import base
+from tempest.common import utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
+ volume_backed = False
@classmethod
def setup_credentials(cls):
@@ -38,13 +39,12 @@
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
- cls.networks_client = cls.os_primary.networks_client
- cls.subnets_client = cls.os_primary.subnets_client
@classmethod
def resource_setup(cls):
- cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
+ validation_resources = cls.get_class_validation_resources(
+ cls.os_primary)
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
@@ -53,30 +53,18 @@
disk_config = cls.disk_config
server_initial = cls.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
- adminPass=cls.password)
+ adminPass=cls.password,
+ volume_backed=cls.volume_backed)
cls.server = (cls.client.show_server(server_initial['id'])
['server'])
- def _create_net_subnet_ret_net_from_cidr(self, cidr):
- name_net = data_utils.rand_name(self.__class__.__name__)
- net = self.networks_client.create_network(name=name_net)
- self.addCleanup(self.networks_client.delete_network,
- net['network']['id'])
-
- subnet = self.subnets_client.create_subnet(
- network_id=net['network']['id'],
- cidr=cidr,
- ip_version=4)
- self.addCleanup(self.subnets_client.delete_subnet,
- subnet['subnet']['id'])
- return net
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
@@ -87,7 +75,11 @@
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
- self.assertEqual(self.image_ref, self.server['image']['id'])
+ if self.volume_backed:
+ # Image is an empty string as per documentation
+ self.assertEqual("", self.server['image'])
+ else:
+ self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@@ -115,11 +107,13 @@
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
linux_client = remote_client.RemoteClient(
- self.get_server_ip(self.server),
+ self.get_server_ip(self.server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
output = linux_client.exec_command('grep -c ^processor /proc/cpuinfo')
@@ -130,11 +124,13 @@
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
linux_client = remote_client.RemoteClient(
- self.get_server_ip(self.server),
+ self.get_server_ip(self.server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
hostname = linux_client.exec_command("hostname").rstrip()
@@ -142,89 +138,6 @@
'hostname "%s" but got "%s".' % (self.name, hostname))
self.assertEqual(self.name.lower(), hostname, msg)
- @decorators.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
- @testtools.skipUnless(
- test.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
- 'ServerGroupAffinityFilter is not available.')
- def test_create_server_with_scheduler_hint_group(self):
- # Create a server with the scheduler hint "group".
- group_id = self.create_test_server_group()['id']
- hints = {'group': group_id}
- server = self.create_test_server(scheduler_hints=hints,
- wait_until='ACTIVE')
-
- # Check a server is in the group
- server_group = (self.server_groups_client.show_server_group(group_id)
- ['server_group'])
- self.assertIn(server['id'], server_group['members'])
-
- @decorators.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
- @testtools.skipUnless(CONF.service_available.neutron,
- 'Neutron service must be available.')
- def test_verify_multiple_nics_order(self):
- # Verify that the networks order given at the server creation is
- # preserved within the server.
- net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
- net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
-
- networks = [{'uuid': net1['network']['id']},
- {'uuid': net2['network']['id']}]
-
- server_multi_nics = self.create_test_server(
- networks=networks, wait_until='ACTIVE')
-
- # Cleanup server; this is needed in the test case because with the LIFO
- # nature of the cleanups, if we don't delete the server first, the port
- # will still be part of the subnet and we'll get a 409 from Neutron
- # when trying to delete the subnet. The tear down in the base class
- # will try to delete the server and get a 404 but it's ignored so
- # we're OK.
- self.addCleanup(self.delete_server, server_multi_nics['id'])
-
- addresses = (self.client.list_addresses(server_multi_nics['id'])
- ['addresses'])
-
- # We can't predict the ip addresses assigned to the server on networks.
- # Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
- # other times ['19.80.0.3', '19.86.0.3']. So we check if the first
- # address is in first network, similarly second address is in second
- # network.
- addr = [addresses[net1['network']['name']][0]['addr'],
- addresses[net2['network']['name']][0]['addr']]
- networks = [netaddr.IPNetwork('19.80.0.0/24'),
- netaddr.IPNetwork('19.86.0.0/24')]
- for address, network in zip(addr, networks):
- self.assertIn(address, network)
-
- @decorators.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
- @testtools.skipUnless(CONF.service_available.neutron,
- 'Neutron service must be available.')
- def test_verify_duplicate_network_nics(self):
- # Verify that server creation does not fail when more than one nic
- # is created on the same network.
- net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
- net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
-
- networks = [{'uuid': net1['network']['id']},
- {'uuid': net2['network']['id']},
- {'uuid': net1['network']['id']}]
-
- server_multi_nics = self.create_test_server(
- networks=networks, wait_until='ACTIVE')
- self.addCleanup(self.delete_server, server_multi_nics['id'])
-
- addresses = (self.client.list_addresses(server_multi_nics['id'])
- ['addresses'])
-
- addr = [addresses[net1['network']['name']][0]['addr'],
- addresses[net2['network']['name']][0]['addr'],
- addresses[net1['network']['name']][1]['addr']]
- networks = [netaddr.IPNetwork('19.80.0.0/24'),
- netaddr.IPNetwork('19.86.0.0/24'),
- netaddr.IPNetwork('19.80.0.0/24')]
- for address, network in zip(addr, networks):
- self.assertIn(address, network)
-
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@@ -235,3 +148,15 @@
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
+
+
+class ServersTestBootFromVolume(ServersTestJSON):
+ """Run the `ServersTestJSON` tests with a volume backed VM"""
+ volume_backed = True
+
+ @classmethod
+ def skip_checks(cls):
+ super(ServersTestBootFromVolume, cls).skip_checks()
+ if not utils.get_service_list()['volume']:
+ msg = "Volume service not enabled."
+ raise cls.skipException(msg)
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
new file mode 100644
index 0000000..3447d85
--- /dev/null
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -0,0 +1,120 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class ServersTestMultiNic(base.BaseV2ComputeTest):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(ServersTestMultiNic, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServersTestMultiNic, cls).setup_clients()
+ cls.client = cls.servers_client
+ cls.networks_client = cls.os_primary.networks_client
+ cls.subnets_client = cls.os_primary.subnets_client
+
+ def _create_net_subnet_ret_net_from_cidr(self, cidr):
+ name_net = data_utils.rand_name(self.__class__.__name__)
+ net = self.networks_client.create_network(name=name_net)
+ self.addCleanup(self.networks_client.delete_network,
+ net['network']['id'])
+
+ subnet = self.subnets_client.create_subnet(
+ network_id=net['network']['id'],
+ cidr=cidr,
+ ip_version=4)
+ self.addCleanup(self.subnets_client.delete_subnet,
+ subnet['subnet']['id'])
+ return net
+
+ @decorators.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
+ @testtools.skipUnless(CONF.service_available.neutron,
+ 'Neutron service must be available.')
+ def test_verify_multiple_nics_order(self):
+ # Verify that the networks order given at the server creation is
+ # preserved within the server.
+ net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+ net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+
+ networks = [{'uuid': net1['network']['id']},
+ {'uuid': net2['network']['id']}]
+
+ server_multi_nics = self.create_test_server(
+ networks=networks, wait_until='ACTIVE')
+
+ # Cleanup server; this is needed in the test case because with the LIFO
+ # nature of the cleanups, if we don't delete the server first, the port
+ # will still be part of the subnet and we'll get a 409 from Neutron
+ # when trying to delete the subnet. The tear down in the base class
+ # will try to delete the server and get a 404 but it's ignored so
+ # we're OK.
+ self.addCleanup(self.delete_server, server_multi_nics['id'])
+
+ addresses = (self.client.list_addresses(server_multi_nics['id'])
+ ['addresses'])
+
+ # We can't predict the ip addresses assigned to the server on networks.
+ # Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
+ # other times ['19.80.0.3', '19.86.0.3']. So we check if the first
+ # address is in first network, similarly second address is in second
+ # network.
+ addr = [addresses[net1['network']['name']][0]['addr'],
+ addresses[net2['network']['name']][0]['addr']]
+ networks = [netaddr.IPNetwork('19.80.0.0/24'),
+ netaddr.IPNetwork('19.86.0.0/24')]
+ for address, network in zip(addr, networks):
+ self.assertIn(address, network)
+
+ @decorators.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
+ @testtools.skipUnless(CONF.service_available.neutron,
+ 'Neutron service must be available.')
+ def test_verify_duplicate_network_nics(self):
+ # Verify that server creation does not fail when more than one nic
+ # is created on the same network.
+ net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+ net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+
+ networks = [{'uuid': net1['network']['id']},
+ {'uuid': net2['network']['id']},
+ {'uuid': net1['network']['id']}]
+
+ server_multi_nics = self.create_test_server(
+ networks=networks, wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, server_multi_nics['id'])
+
+ addresses = (self.client.list_addresses(server_multi_nics['id'])
+ ['addresses'])
+
+ addr = [addresses[net1['network']['name']][0]['addr'],
+ addresses[net2['network']['name']][0]['addr'],
+ addresses[net1['network']['name']][1]['addr']]
+ networks = [netaddr.IPNetwork('19.80.0.0/24'),
+ netaddr.IPNetwork('19.86.0.0/24'),
+ netaddr.IPNetwork('19.80.0.0/24')]
+ for address, network in zip(addr, networks):
+ self.assertIn(address, network)
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 2b03b2b..0093752 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -17,10 +17,10 @@
from tempest.api.compute import base
from tempest.common import compute
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -104,7 +104,7 @@
waiters.wait_for_server_termination(self.client, server['id'])
@decorators.idempotent_id('d0f3f0d6-d9b6-4a32-8da4-23015dcab23c')
- @test.services('volume')
+ @utils.services('volume')
def test_delete_server_while_in_attached_volume(self):
# Delete a server while a volume is attached to it
device = '/dev/%s' % CONF.compute.volume_device_name
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 7ee1b02..a126fd6 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -17,13 +17,13 @@
from oslo_log import log as logging
from tempest.api.compute import base
+from tempest.common import utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
-from tempest import test
CONF = config.CONF
@@ -66,11 +66,6 @@
dhcp=True)
super(DeviceTaggingTest, cls).setup_credentials()
- @classmethod
- def resource_setup(cls):
- cls.set_validation_resources()
- super(DeviceTaggingTest, cls).resource_setup()
-
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
for d in md_dict['devices']:
@@ -94,7 +89,7 @@
'other']))
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
- @test.services('network', 'volume', 'image')
+ @utils.services('network', 'volume', 'image')
def test_device_tagging(self):
# Create volumes
# The create_volume methods waits for the volumes to be available and
@@ -139,9 +134,12 @@
# Create server
admin_pass = data_utils.rand_password()
config_drive_enabled = CONF.compute_feature_enabled.config_drive
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
server = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
config_drive=config_drive_enabled,
adminPass=admin_pass,
name=data_utils.rand_name('device-tagging-server'),
@@ -208,10 +206,10 @@
self.addCleanup(self.delete_server, server['id'])
self.ssh_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
admin_pass,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 921b7da..14aecfd 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -15,9 +15,9 @@
import testtools
from tempest.api.compute import base
-from tempest.common import fixed_network
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import fixed_network
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -262,7 +262,7 @@
# so as to ensure only one server is returned.
ip_list = {}
self.s1 = self.client.show_server(self.s1['id'])['server']
- # Get first ip address inspite of v4 or v6
+ # Get first ip address in spite of v4 or v6
ip_addr = self.s1['addresses'][self.fixed_network_name][0]['addr']
ip_list[ip_addr] = self.s1['id']
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 90b0665..d9581e3 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -166,7 +166,7 @@
self._validate_novnc_html(body['url'])
# Do the WebSockify HTTP Request to novncproxy to do the RFB connection
self._websocket = compute.create_websocket(body['url'])
- # Validate that we succesfully connected and upgraded to Web Sockets
+ # Validate that we successfully connected and upgraded to Web Sockets
self._validate_websocket_upgrade()
# Validate the RFB Negotiation to determine if a valid VNC session
self._validate_rfb_negotiation()
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index cd09177..e2be249 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -19,13 +19,14 @@
from tempest.api.compute import base
from tempest.common import compute
+from tempest.common import utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -33,8 +34,6 @@
class ServerActionsTestJSON(base.BaseV2ComputeTest):
- run_ssh = CONF.validation.run_validation
-
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
@@ -45,13 +44,18 @@
self.server_id, 'ACTIVE')
except lib_exc.NotFound:
# The server was deleted by previous test, create a new one
+ # Use class level validation resources to avoid them being
+ # deleted once a test is over
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
server = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE')
self.__class__.server_id = server['id']
except Exception:
# Rebuild server if something happened to it during a test
- self.__class__.server_id = self.rebuild_server(
+ self.__class__.server_id = self.recreate_server(
self.server_id, validatable=True)
def tearDown(self):
@@ -70,10 +74,8 @@
@classmethod
def resource_setup(cls):
- cls.set_validation_resources()
-
super(ServerActionsTestJSON, cls).resource_setup()
- cls.server_id = cls.rebuild_server(None, validatable=True)
+ cls.server_id = cls.recreate_server(None, validatable=True)
@decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
@@ -81,8 +83,11 @@
def test_change_server_password(self):
# Since this test messes with the password and makes the
# server unreachable, it should create its own server
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
newserver = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE')
# The server's password should be set to the provided password
new_password = 'Newpass1234'
@@ -93,7 +98,7 @@
# Verify that the user can authenticate with the new password
server = self.client.show_server(newserver['id'])['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user,
new_password,
server=server,
@@ -102,13 +107,15 @@
def _test_reboot_server(self, reboot_type):
if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
# Get the time the server was last rebooted,
server = self.client.show_server(self.server_id)['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
boot_time = linux_client.get_boot_time()
@@ -123,10 +130,10 @@
if CONF.validation.run_validation:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
new_boot_time = linux_client.get_boot_time()
@@ -166,8 +173,7 @@
.format(image_ref, rebuilt_server['image']['id']))
self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
- @decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
- def test_rebuild_server(self):
+ def _test_rebuild_server(self):
# Get the IPs the server has before rebuilding it
original_addresses = (self.client.show_server(self.server_id)['server']
['addresses'])
@@ -203,6 +209,8 @@
self.assertEqual(original_addresses, server['addresses'])
if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
# Authentication is attempted in the following order of priority:
# 1.The key passed in, if one was passed in.
# 2.Any key we can find through an SSH agent (if allowed).
@@ -210,14 +218,18 @@
# ~/.ssh/ (if allowed).
# 4.Plain username/password auth, if a password was given.
linux_client = remote_client.RemoteClient(
- self.get_server_ip(rebuilt_server),
+ self.get_server_ip(rebuilt_server, validation_resources),
self.ssh_user,
password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=rebuilt_server,
servers_client=self.client)
linux_client.validate_authentication()
+ @decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
+ def test_rebuild_server(self):
+ self._test_rebuild_server()
+
@decorators.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
@@ -251,7 +263,7 @@
self.client.start_server(self.server_id)
@decorators.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
- @test.services('volume')
+ @utils.services('volume')
def test_rebuild_server_with_volume_attached(self):
# create a new volume and attach it to the server
volume = self.create_volume()
@@ -260,7 +272,7 @@
self.attach_volume(server, volume)
# run general rebuild test
- self.test_rebuild_server()
+ self._test_rebuild_server()
# make sure the volume is attached to the instance after rebuild
vol_after_rebuild = self.volumes_client.show_volume(volume['id'])
@@ -269,45 +281,73 @@
self.assertEqual(self.server_id,
vol_after_rebuild['attachments'][0]['server_id'])
- def _test_resize_server_confirm(self, stop=False):
+ def _test_resize_server_confirm(self, server_id, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
if stop:
- self.client.stop_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id,
+ self.client.stop_server(server_id)
+ waiters.wait_for_server_status(self.client, server_id,
'SHUTOFF')
- self.client.resize_server(self.server_id, self.flavor_ref_alt)
+ self.client.resize_server(server_id, self.flavor_ref_alt)
# NOTE(jlk): Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
- self.addCleanup(self.delete_server, self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id,
+ self.addCleanup(self.delete_server, server_id)
+ waiters.wait_for_server_status(self.client, server_id,
'VERIFY_RESIZE')
- self.client.confirm_resize_server(self.server_id)
+ self.client.confirm_resize_server(server_id)
expected_status = 'SHUTOFF' if stop else 'ACTIVE'
- waiters.wait_for_server_status(self.client, self.server_id,
+ waiters.wait_for_server_status(self.client, server_id,
expected_status)
- server = self.client.show_server(self.server_id)['server']
+ server = self.client.show_server(server_id)['server']
self.assertEqual(self.flavor_ref_alt, server['flavor']['id'])
if stop:
# NOTE(mriedem): tearDown requires the server to be started.
- self.client.start_server(self.server_id)
+ self.client.start_server(server_id)
@decorators.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm(self):
- self._test_resize_server_confirm(stop=False)
+ self._test_resize_server_confirm(self.server_id, stop=False)
+
+ @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
+ @decorators.related_bug('1728603')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @utils.services('volume')
+ def test_resize_volume_backed_server_confirm(self):
+ # We have to create a new server that is volume-backed since the one
+ # from setUp is not volume-backed.
+ server = self.create_test_server(
+ volume_backed=True, wait_until='ACTIVE')
+ self._test_resize_server_confirm(server['id'])
+ if CONF.compute_feature_enabled.console_output:
+ # Now do something interactive with the guest like get its console
+ # output; we don't actually care about the output,
+ # just that it doesn't raise an error.
+ self.client.get_console_output(server['id'])
+ if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.ssh_user,
+ password=None,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.client)
+ linux_client.validate_authentication()
@decorators.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm_from_stopped(self):
- self._test_resize_server_confirm(stop=True)
+ self._test_resize_server_confirm(self.server_id, stop=True)
@decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
@@ -332,7 +372,7 @@
@decorators.idempotent_id('b963d4f1-94b3-4c40-9e97-7b583f46e470')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
- @test.services('image')
+ @utils.services('image')
def test_create_backup(self):
# Positive test:create backup successfully and rotate backups correctly
# create the first and the second backup
@@ -368,7 +408,11 @@
"been successful as it should have been "
"deleted during rotation.", oldest_backup)
- image1_id = data_utils.parse_image_id(resp['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ image1_id = resp['image_id']
+ else:
+ image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
waiters.wait_for_image_status(glance_client,
image1_id, 'active')
@@ -379,7 +423,11 @@
backup_type='daily',
rotation=2,
name=backup2).response
- image2_id = data_utils.parse_image_id(resp['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ image2_id = resp['image_id']
+ else:
+ image2_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(glance_client.delete_image, image2_id)
waiters.wait_for_image_status(glance_client,
image2_id, 'active')
@@ -418,7 +466,11 @@
backup_type='daily',
rotation=2,
name=backup3).response
- image3_id = data_utils.parse_image_id(resp['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ image3_id = resp['image_id']
+ else:
+ image3_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(glance_client.delete_image, image3_id)
# the first back up should be deleted
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@@ -517,7 +569,7 @@
@decorators.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
- @test.services('image')
+ @utils.services('image')
def test_shelve_unshelve_server(self):
if CONF.image_feature_enabled.api_v2:
glance_client = self.os_primary.image_client_v2
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index 022ceba..f79b05f 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -14,8 +14,8 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class ServerAddressesTestJSON(base.BaseV2ComputeTest):
@@ -39,7 +39,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('6eb718c0-02d9-4d5e-acd1-4e0c269cef39')
- @test.services('network')
+ @utils.services('network')
def test_list_server_addresses(self):
# All public and private addresses for
# a server should be returned
@@ -51,13 +51,10 @@
self.assertNotEmpty(addresses)
for network_addresses in addresses.values():
self.assertNotEmpty(network_addresses)
- for address in network_addresses:
- self.assertTrue(address['addr'])
- self.assertTrue(address['version'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('87bbc374-5538-4f64-b673-2b0e4443cc30')
- @test.services('network')
+ @utils.services('network')
def test_list_server_addresses_by_network(self):
# Providing a network type should filter
# the addresses return by that type
diff --git a/tempest/api/compute/servers/test_server_addresses_negative.py b/tempest/api/compute/servers/test_server_addresses_negative.py
index 76a102b..b2b3cc0 100644
--- a/tempest/api/compute/servers/test_server_addresses_negative.py
+++ b/tempest/api/compute/servers/test_server_addresses_negative.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class ServerAddressesNegativeTestJSON(base.BaseV2ComputeTest):
@@ -38,7 +38,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('02c3f645-2d2e-4417-8525-68c0407d001b')
- @test.services('network')
+ @utils.services('network')
def test_list_server_addresses_invalid_server_id(self):
# List addresses request should fail if server id not in system
self.assertRaises(lib_exc.NotFound, self.client.list_addresses,
@@ -46,7 +46,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a2ab5144-78c0-4942-a0ed-cc8edccfd9ba')
- @test.services('network')
+ @utils.services('network')
def test_list_server_addresses_by_network_neg(self):
# List addresses by network should fail if network name not valid
self.assertRaises(lib_exc.NotFound,
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 69d7897..5286c8f 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -13,10 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
+from tempest.common import compute
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class ServerGroupTestJSON(base.BaseV2ComputeTest):
@@ -30,7 +33,7 @@
@classmethod
def skip_checks(cls):
super(ServerGroupTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('os-server-groups', 'compute'):
+ if not utils.is_extension_enabled('os-server-groups', 'compute'):
msg = "os-server-groups extension is not enabled."
raise cls.skipException(msg)
@@ -106,3 +109,19 @@
# List the server-group
body = self.client.list_server_groups()['server_groups']
self.assertIn(self.created_server_group, body)
+
+ @decorators.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
+ @testtools.skipUnless(
+ compute.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
+ 'ServerGroupAffinityFilter is not available.')
+ def test_create_server_with_scheduler_hint_group(self):
+ # Create a server with the scheduler hint "group".
+ hints = {'group': self.created_server_group['id']}
+ server = self.create_test_server(scheduler_hints=hints,
+ wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, server['id'])
+
+ # Check a server is in the group
+ server_group = (self.server_groups_client.show_server_group(
+ self.created_server_group['id'])['server_group'])
+ self.assertIn(server['id'], server_group['members'])
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index f77e7d3..fe95018 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -32,7 +32,7 @@
def setUp(self):
super(ServerMetadataTestJSON, self).setUp()
meta = {'key1': 'value1', 'key2': 'value2'}
- self.client.set_server_metadata(self.server['id'], meta)['metadata']
+ self.client.set_server_metadata(self.server['id'], meta)
@decorators.idempotent_id('479da087-92b3-4dcf-aeb3-fd293b2d14ce')
def test_list_server_metadata(self):
@@ -49,8 +49,7 @@
# The server's metadata should be replaced with the provided values
# Create a new set of metadata for the server
req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
- self.client.set_server_metadata(self.server['id'],
- req_metadata)['metadata']
+ self.client.set_server_metadata(self.server['id'], req_metadata)
# Verify the expected values are correct, and that the
# previous values have been removed
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 90b9da4..6f32b46 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -34,11 +35,6 @@
super(ServerPersonalityTestJSON, cls).setup_credentials()
@classmethod
- def resource_setup(cls):
- cls.set_validation_resources()
- super(ServerPersonalityTestJSON, cls).resource_setup()
-
- @classmethod
def skip_checks(cls):
super(ServerPersonalityTestJSON, cls).skip_checks()
if not CONF.compute_feature_enabled.personality:
@@ -48,7 +44,6 @@
def setup_clients(cls):
super(ServerPersonalityTestJSON, cls).setup_clients()
cls.client = cls.servers_client
- cls.user_client = cls.limits_client
@decorators.idempotent_id('3cfe87fd-115b-4a02-b942-7dc36a337fdf')
def test_create_server_with_personality(self):
@@ -57,16 +52,23 @@
personality = [{'path': file_path,
'contents': base64.encode_as_text(file_contents)}]
password = data_utils.rand_password()
- created_server = self.create_test_server(personality=personality,
- adminPass=password,
- wait_until='ACTIVE',
- validatable=True)
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ created_server = self.create_test_server(
+ personality=personality, adminPass=password, wait_until='ACTIVE',
+ validatable=True,
+ validation_resources=validation_resources)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, created_server['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ created_server['id'])
server = self.client.show_server(created_server['id'])['server']
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user, password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
self.assertEqual(file_contents,
@@ -75,8 +77,16 @@
@decorators.idempotent_id('128966d8-71fc-443c-8cab-08e24114ecc9')
def test_rebuild_server_with_personality(self):
- server = self.create_test_server(wait_until='ACTIVE', validatable=True)
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ wait_until='ACTIVE', validatable=True,
+ validation_resources=validation_resources)
server_id = server['id']
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server_id)
file_contents = 'Test server rebuild.'
personality = [{'path': 'rebuild.txt',
'contents': base64.encode_as_text(file_contents)}]
@@ -93,7 +103,7 @@
# number of files are injected into the server.
file_contents = 'This is a test file.'
personality = []
- limits = self.user_client.show_limits()['limits']
+ limits = self.limits_client.show_limits()['limits']
max_file_limit = limits['absolute']['maxPersonality']
if max_file_limit == -1:
raise self.skipException("No limit for personality files")
@@ -112,7 +122,7 @@
# Server should be created successfully if maximum allowed number of
# files is injected into the server during creation.
file_contents = 'This is a test file.'
- limits = self.user_client.show_limits()['limits']
+ limits = self.limits_client.show_limits()['limits']
max_file_limit = limits['absolute']['maxPersonality']
if max_file_limit == -1:
raise self.skipException("No limit for personality files")
@@ -126,16 +136,22 @@
'contents': base64.encode_as_text(file_contents + str(i)),
})
password = data_utils.rand_password()
- created_server = self.create_test_server(personality=person,
- adminPass=password,
- wait_until='ACTIVE',
- validatable=True)
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ created_server = self.create_test_server(
+ personality=person, adminPass=password, wait_until='ACTIVE',
+ validatable=True, validation_resources=validation_resources)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, created_server['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ created_server['id'])
server = self.client.show_server(created_server['id'])['server']
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user, password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
for i in person:
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 5fac433..1260c6b 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -16,12 +16,12 @@
import testtools
from tempest.api.compute import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -109,7 +109,7 @@
self.image_ref_alt)
@decorators.idempotent_id('d0ccac79-0091-4cf4-a1ce-26162d0cc55f')
- @test.services('volume')
+ @utils.services('volume')
@decorators.attr(type=['negative'])
def test_rescued_vm_attach_volume(self):
volume = self.create_volume()
@@ -129,7 +129,7 @@
device='/dev/%s' % self.device)
@decorators.idempotent_id('f56e465b-fe10-48bf-b75d-646cda3a8bc9')
- @test.services('volume')
+ @utils.services('volume')
@decorators.attr(type=['negative'])
def test_rescued_vm_detach_volume(self):
volume = self.create_volume()
diff --git a/tempest/api/compute/servers/test_server_tags.py b/tempest/api/compute/servers/test_server_tags.py
index 0370215..8d0a4e3 100644
--- a/tempest/api/compute/servers/test_server_tags.py
+++ b/tempest/api/compute/servers/test_server_tags.py
@@ -16,9 +16,9 @@
import six
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class ServerTagsTestJSON(base.BaseV2ComputeTest):
@@ -29,7 +29,7 @@
@classmethod
def skip_checks(cls):
super(ServerTagsTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('os-server-tags', 'compute'):
+ if not utils.is_extension_enabled('os-server-tags', 'compute'):
msg = "os-server-tags extension is not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 7fd1dd1..c9ee671 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -19,6 +19,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -31,10 +32,6 @@
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
- def tearDown(self):
- self.clear_servers()
- super(ServersTestJSON, self).tearDown()
-
@decorators.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
@testtools.skipUnless(CONF.compute_feature_enabled.
enable_instance_password,
@@ -43,6 +40,11 @@
# If an admin password is provided on server creation, the server's
# root password should be set to that password.
server = self.create_test_server(adminPass='testpassword')
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server['id'])
# Verify the password is set correctly in the response
self.assertEqual('testpassword', server['adminPass'])
@@ -57,9 +59,19 @@
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id1 = server['id']
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, id1)
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, id1)
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id2 = server['id']
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, id2)
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, id2)
self.assertNotEqual(id1, id2, "Did not create a new server")
server = self.client.show_server(id1)['server']
name1 = server['name']
@@ -76,6 +88,11 @@
self.addCleanup(self.keypairs_client.delete_keypair, key_name)
self.keypairs_client.list_keypairs()
server = self.create_test_server(key_name=key_name)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server['id'])
waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
server = self.client.show_server(server['id'])['server']
self.assertEqual(key_name, server['key_name'])
@@ -98,6 +115,11 @@
def test_update_server_name(self):
# The server name should be changed to the provided value
server = self.create_test_server(wait_until='ACTIVE')
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server['id'])
# Update instance name with non-ASCII characters
prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
self._update_server_name(server['id'], 'ACTIVE', prefix_name)
@@ -115,6 +137,11 @@
def test_update_access_server_address(self):
# The server's access addresses should reflect the provided values
server = self.create_test_server(wait_until='ACTIVE')
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server['id'])
# Update the IPv4 and IPv6 access addresses
self.client.update_server(server['id'],
@@ -131,6 +158,11 @@
def test_create_server_with_ipv6_addr_only(self):
# Create a server without an IPv4 address(only IPv6 address).
server = self.create_test_server(accessIPv6='2001:2001::3')
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server['id'])
waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
server = self.client.show_server(server['id'])['server']
self.assertEqual('2001:2001::3', server['accessIPv6'])
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 764767b..d067bb3 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -19,12 +19,12 @@
from tempest.api.compute import base
from tempest.common import compute
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -37,7 +37,7 @@
waiters.wait_for_server_status(self.client, self.server_id,
'ACTIVE')
except Exception:
- self.__class__.server_id = self.rebuild_server(self.server_id)
+ self.__class__.server_id = self.recreate_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
@@ -217,7 +217,7 @@
@decorators.attr(type=['negative'])
@decorators.related_bug('1651064', status_code=500)
- @test.services('volume')
+ @utils.services('volume')
@decorators.idempotent_id('12146ac1-d7df-4928-ad25-b1f99e5286cd')
def test_create_server_invalid_bdm_in_2nd_dict(self):
volume = self.create_volume()
@@ -512,7 +512,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('74085be3-a370-4ca2-bc51-2d0e10e0f573')
- @test.services('volume', 'image')
+ @utils.services('volume', 'image')
def test_create_server_from_non_bootable_volume(self):
# Create a volume
volume = self.create_volume()
@@ -551,7 +551,7 @@
waiters.wait_for_server_status(self.servers_client, self.server_id,
'ACTIVE')
except Exception:
- self.__class__.server_id = self.rebuild_server(self.server_id)
+ self.__class__.server_id = self.recreate_server(self.server_id)
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 6b625d9..90f04ff 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -17,10 +17,10 @@
import testtools
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
-from tempest import test
CONF = config.CONF
@@ -44,7 +44,7 @@
cls.server = cls.create_test_server(wait_until='ACTIVE')
@decorators.idempotent_id('96c4e2ef-5e4d-4d7f-87f5-fed6dca18016')
- @test.services('network')
+ @utils.services('network')
def test_list_virtual_interfaces(self):
# Positive test:Should be able to GET the virtual interfaces list
# for a given server_id
@@ -56,12 +56,11 @@
self.client.list_virtual_interfaces(self.server['id'])
else:
output = self.client.list_virtual_interfaces(self.server['id'])
- self.assertIsNotNone(output)
- virt_ifaces = output
- self.assertNotEmpty(virt_ifaces['virtual_interfaces'],
+ virt_ifaces = output['virtual_interfaces']
+ self.assertNotEmpty(virt_ifaces,
'Expected virtual interfaces, got 0 '
'interfaces.')
- for virt_iface in virt_ifaces['virtual_interfaces']:
+ for virt_iface in virt_ifaces:
mac_address = virt_iface['mac_address']
self.assertTrue(netaddr.valid_mac(mac_address),
"Invalid mac address detected. mac address: %s"
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index 173784a..20923a8 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -14,10 +14,10 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
@@ -35,7 +35,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('64ebd03c-1089-4306-93fa-60f5eb5c803c')
- @test.services('network')
+ @utils.services('network')
def test_list_virtual_interfaces_invalid_server_id(self):
# Negative test: Should not be able to GET virtual interfaces
# for an invalid server_id
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 42e13bd..34faf5f 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -16,9 +16,9 @@
from oslo_log import log as logging
from tempest.api.compute import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class ExtensionsTestJSON(base.BaseV2ComputeTest):
+class ExtensionsTest(base.BaseV2ComputeTest):
@decorators.idempotent_id('3bb27738-b759-4e0d-a5fa-37d7a6df07d1')
def test_list_extensions(self):
@@ -48,7 +48,7 @@
raise self.skipException('There are not any extensions configured')
@decorators.idempotent_id('05762f39-bdfa-4cdb-9b46-b78f8e78e2fd')
- @test.requires_ext(extension='os-consoles', service='compute')
+ @utils.requires_ext(extension='os-consoles', service='compute')
def test_get_extension(self):
# get the specified extensions
extension = self.extensions_client.show_extension('os-consoles')
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 9d83ee1..7cf90ae 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -15,8 +15,8 @@
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class QuotasTestJSON(base.BaseV2ComputeTest):
@@ -24,7 +24,7 @@
@classmethod
def skip_checks(cls):
super(QuotasTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('os-quota-sets', 'compute'):
+ if not utils.is_extension_enabled('os-quota-sets', 'compute'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/compute/test_tenant_networks.py b/tempest/api/compute/test_tenant_networks.py
index 18c5d38..b55e2c0 100644
--- a/tempest/api/compute/test_tenant_networks.py
+++ b/tempest/api/compute/test_tenant_networks.py
@@ -13,8 +13,8 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class ComputeTenantNetworksTest(base.BaseV2ComputeTest):
@@ -31,7 +31,7 @@
super(ComputeTenantNetworksTest, cls).setup_credentials()
@decorators.idempotent_id('edfea98e-bbe3-4c7a-9739-87b986baff26')
- @test.services('network')
+ @utils.services('network')
def test_list_show_tenant_networks(self):
# Fetch all networks that are visible to the tenant: this may include
# shared and external networks
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ed5f9a6..9bef80f 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.compute import base
from tempest.common import compute
from tempest.common.utils.linux import remote_client
@@ -42,35 +40,37 @@
@classmethod
def resource_setup(cls):
- cls.set_validation_resources()
super(AttachVolumeTestJSON, cls).resource_setup()
cls.device = CONF.compute.volume_device_name
def _create_server(self):
# Start a server and wait for it to become ready
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
server = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=self.image_ssh_password)
self.addCleanup(self.delete_server, server['id'])
# Record addresses so that we can ssh later
server['addresses'] = self.servers_client.list_addresses(
server['id'])['addresses']
- return server
+ return server, validation_resources
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
- server = self._create_server()
+ server, validation_resources = self._create_server()
# NOTE(andreaf) Create one remote client used throughout the test.
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
# NOTE(andreaf) We need to ensure the ssh key has been
@@ -113,7 +113,7 @@
@decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
def test_list_get_volume_attachments(self):
# List volume attachment of the server
- server = self._create_server()
+ server, _ = self._create_server()
volume_1st = self.create_volume()
attachment_1st = self.attach_volume(server, volume_1st,
device=('/dev/%s' % self.device))
@@ -143,6 +143,10 @@
self.assertEqual(server['id'], body['serverId'])
self.assertEqual(attachment['volumeId'], body['volumeId'])
self.assertEqual(attachment['id'], body['id'])
+ self.servers_client.detach_volume(server['id'],
+ attachment['volumeId'])
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, attachment['volumeId'], 'available')
class AttachVolumeShelveTestJSON(AttachVolumeTestJSON):
@@ -155,15 +159,21 @@
min_microversion = '2.20'
max_microversion = 'latest'
- def _count_volumes(self, server):
+ @classmethod
+ def skip_checks(cls):
+ super(AttachVolumeShelveTestJSON, cls).skip_checks()
+ if not CONF.compute_feature_enabled.shelve:
+ raise cls.skipException('Shelve is not available.')
+
+ def _count_volumes(self, server, validation_resources):
# Count number of volumes on an instance
volumes = 0
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
@@ -171,7 +181,7 @@
volumes = int(linux_client.exec_command(command).strip())
return volumes
- def _shelve_server(self, server):
+ def _shelve_server(self, server, validation_resources):
# NOTE(andreaf) If we are going to shelve a server, we should
# check first whether the server is ssh-able. Otherwise we
# won't be able to distinguish failures introduced by shelve
@@ -180,10 +190,10 @@
# avoid breaking the VM
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
linux_client.validate_authentication()
@@ -191,31 +201,34 @@
# If validation went ok, or it was skipped, shelve the server
compute.shelve_server(self.servers_client, server['id'])
- def _unshelve_server_and_check_volumes(self, server, number_of_volumes):
+ def _unshelve_server_and_check_volumes(self, server,
+ validation_resources,
+ number_of_volumes):
# Unshelve the instance and check that there are expected volumes
self.servers_client.unshelve_server(server['id'])
waiters.wait_for_server_status(self.servers_client,
server['id'],
'ACTIVE')
if CONF.validation.run_validation:
- counted_volumes = self._count_volumes(server)
+ counted_volumes = self._count_volumes(
+ server, validation_resources)
self.assertEqual(number_of_volumes, counted_volumes)
@decorators.idempotent_id('13a940b6-3474-4c3c-b03f-29b89112bfee')
- @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
- 'Shelve is not available.')
def test_attach_volume_shelved_or_offload_server(self):
# Create server, count number of volumes on it, shelve
# server and attach pre-created volume to shelved server
- server = self._create_server()
+ server, validation_resources = self._create_server()
volume = self.create_volume()
- num_vol = self._count_volumes(server)
- self._shelve_server(server)
+ num_vol = self._count_volumes(server, validation_resources)
+ self._shelve_server(server, validation_resources)
attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ device=('/dev/%s' % self.device),
+ check_reserved=True)
# Unshelve the instance and check that attached volume exists
- self._unshelve_server_and_check_volumes(server, num_vol + 1)
+ self._unshelve_server_and_check_volumes(
+ server, validation_resources, num_vol + 1)
# Get volume attachment of the server
volume_attachment = self.servers_client.show_volume_attachment(
@@ -228,22 +241,22 @@
self.assertIsNotNone(volume_attachment['device'])
@decorators.idempotent_id('b54e86dd-a070-49c4-9c07-59ae6dae15aa')
- @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
- 'Shelve is not available.')
def test_detach_volume_shelved_or_offload_server(self):
# Count number of volumes on instance, shelve
# server and attach pre-created volume to shelved server
- server = self._create_server()
+ server, validation_resources = self._create_server()
volume = self.create_volume()
- num_vol = self._count_volumes(server)
- self._shelve_server(server)
+ num_vol = self._count_volumes(server, validation_resources)
+ self._shelve_server(server, validation_resources)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device))
+ self.attach_volume(server, volume, device=('/dev/%s' % self.device),
+ check_reserved=True)
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# Unshelve the instance and check that we have the expected number of
# volume(s)
- self._unshelve_server_and_check_volumes(server, num_vol)
+ self._unshelve_server_and_check_volumes(
+ server, validation_resources, num_vol)
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index 0f436eb..b8ca81d 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
@@ -38,6 +36,9 @@
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
+ if not CONF.volume_feature_enabled.snapshot:
+ skip_msg = ("Cinder volume snapshots are disabled")
+ raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
@@ -46,8 +47,6 @@
cls.snapshots_client = cls.snapshots_extensions_client
@decorators.idempotent_id('cd4ec87d-7825-450d-8040-6e2068f2da8f')
- @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
- 'Cinder volume snapshots are disabled')
def test_volume_snapshot_create_get_list_delete(self):
volume = self.create_volume()
self.addCleanup(self.delete_volume, volume['id'])
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 01cfb5f..d83d49e 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -52,13 +52,9 @@
volume = self.create_volume(size=CONF.volume.volume_size,
display_name=v_name,
metadata=metadata)
- self.assertIn('id', volume)
- self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
- self.assertIsNotNone(volume['id'],
- "Field volume id is empty or not found.")
# GET Volume
fetched_volume = self.volumes_client.show_volume(
volume['id'])['volume']
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index 59fc4d8..947706e 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -23,15 +23,15 @@
@classmethod
def resource_setup(cls):
super(EndPointsTestJSON, cls).resource_setup()
- cls.service_ids = list()
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
service_data = cls.services_client.create_service(
name=s_name, type=s_type,
description=s_description)['OS-KSADM:service']
+ cls.addClassResourceCleanup(cls.services_client.delete_service,
+ service_data['id'])
cls.service_id = service_data['id']
- cls.service_ids.append(cls.service_id)
# Create endpoints so as to use for LIST and GET test cases
cls.setup_endpoints = list()
for _ in range(2):
@@ -43,18 +43,12 @@
publicurl=url,
adminurl=url,
internalurl=url)['endpoint']
+ cls.addClassResourceCleanup(cls.endpoints_client.delete_endpoint,
+ endpoint['id'])
# list_endpoints() will return 'enabled' field
endpoint['enabled'] = True
cls.setup_endpoints.append(endpoint)
- @classmethod
- def resource_cleanup(cls):
- for e in cls.setup_endpoints:
- cls.endpoints_client.delete_endpoint(e['id'])
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
def test_list_endpoints(self):
# Get a list of endpoints
diff --git a/tempest/api/identity/admin/v2/test_roles.py b/tempest/api/identity/admin/v2/test_roles.py
index 124bb5f..9736a76 100644
--- a/tempest/api/identity/admin/v2/test_roles.py
+++ b/tempest/api/identity/admin/v2/test_roles.py
@@ -28,14 +28,11 @@
for _ in range(5):
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.roles_client.delete_role, role['id'])
cls.roles.append(role)
- @classmethod
- def resource_cleanup(cls):
- super(RolesTestJSON, cls).resource_cleanup()
- for role in cls.roles:
- cls.roles_client.delete_role(role['id'])
-
def _get_role_params(self):
user = self.setup_test_user()
tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index 634cf21..e2ed5ef 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -41,7 +41,6 @@
self.assertIsNotNone(service_data['id'])
self.addCleanup(self._del_service, service_data['id'])
# Verifying response body of create service
- self.assertIn('id', service_data)
self.assertIn('name', service_data)
self.assertEqual(name, service_data['name'])
self.assertIn('type', service_data)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 9fe978c..ca6b03e 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -34,19 +34,6 @@
domain = cls.create_domain(enabled=i < 2)
cls.setup_domains.append(domain)
- @classmethod
- def resource_cleanup(cls):
- for domain in cls.setup_domains:
- cls._delete_domain(domain['id'])
- super(DomainsTestJSON, cls).resource_cleanup()
-
- @classmethod
- def _delete_domain(cls, domain_id):
- # It is necessary to disable the domain before deleting,
- # or else it would result in unauthorized error
- cls.domains_client.update_domain(domain_id, enabled=False)
- cls.domains_client.delete_domain(domain_id)
-
@decorators.idempotent_id('8cf516ef-2114-48f1-907b-d32726c734d4')
def test_list_domains(self):
# Test to list domains
@@ -92,8 +79,7 @@
domain = self.domains_client.create_domain(
name=d_name, description=d_desc)['domain']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_domain, domain['id'])
- self.assertIn('id', domain)
+ self.delete_domain, domain['id'])
self.assertIn('description', domain)
self.assertIn('name', domain)
self.assertIn('enabled', domain)
@@ -146,8 +132,7 @@
# Create domain only with name
d_name = data_utils.rand_name('domain')
domain = self.domains_client.create_domain(name=d_name)['domain']
- self.addCleanup(self._delete_domain, domain['id'])
- self.assertIn('id', domain)
+ self.addCleanup(self.delete_domain, domain['id'])
expected_data = {'name': d_name, 'enabled': True}
self.assertEqual('', domain['description'])
self.assertDictContainsSubset(expected_data, domain)
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index 1a0b851..56f7d32 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -20,7 +20,6 @@
class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest):
- _interface = 'json'
@decorators.attr(type=['negative', 'gate'])
@decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 5cd456c..49dbba1 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -54,17 +54,17 @@
super(EndPointGroupsTest, cls).resource_cleanup()
@classmethod
- def _create_service(self):
+ def _create_service(cls):
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
service_data = (
- self.services_client.create_service(name=s_name,
- type=s_type,
- description=s_description))
+ cls.services_client.create_service(name=s_name,
+ type=s_type,
+ description=s_description))
service_id = service_data['service']['id']
- self.service_ids.append(service_id)
+ cls.service_ids.append(service_id)
return service_id
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index c9faa9a..5d48f68 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -117,7 +117,6 @@
self.setup_endpoint_ids.append(endpoint['id'])
# Asserting Create Endpoint response body
- self.assertIn('id', endpoint)
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 4bc987f..507810b 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -14,9 +14,12 @@
# under the License.
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class GroupsV3TestJSON(base.BaseIdentityV3AdminTest):
@@ -25,13 +28,6 @@
super(GroupsV3TestJSON, cls).resource_setup()
cls.domain = cls.create_domain()
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the domains created in the setup
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(GroupsV3TestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('2e80343b-6c81-4ac3-88c7-452f3e9d5129')
def test_group_create_update_get(self):
name = data_utils.rand_name('Group')
@@ -130,7 +126,14 @@
self.addCleanup(self.groups_client.delete_group, group['id'])
group_ids.append(group['id'])
# List and Verify Groups
- body = self.groups_client.list_groups()['groups']
+ # When domain specific drivers are enabled the operations
+ # of listing all users and listing all groups are not supported,
+ # they need a domain filter to be specified
+ if CONF.identity_feature_enabled.domain_specific_drivers:
+ body = self.groups_client.list_groups(
+ domain_id=self.domain['id'])['groups']
+ else:
+ body = self.groups_client.list_groups()['groups']
for g in body:
fetched_ids.append(g['id'])
missing_groups = [g for g in group_ids if g not in fetched_ids]
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index 49b6585..c0c79b9 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -11,22 +11,22 @@
# under the License.
from tempest.api.identity import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
-class BaseInheritsV3Test(base.BaseIdentityV3AdminTest):
+class InheritsV3TestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def skip_checks(cls):
- super(BaseInheritsV3Test, cls).skip_checks()
- if not test.is_extension_enabled('OS-INHERIT', 'identity'):
+ super(InheritsV3TestJSON, cls).skip_checks()
+ if not utils.is_extension_enabled('OS-INHERIT', 'identity'):
raise cls.skipException("Inherits aren't enabled")
@classmethod
def resource_setup(cls):
- super(BaseInheritsV3Test, cls).resource_setup()
+ super(InheritsV3TestJSON, cls).resource_setup()
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
@@ -49,17 +49,12 @@
cls.groups_client.delete_group(cls.group['id'])
cls.users_client.delete_user(cls.user['id'])
cls.projects_client.delete_project(cls.project['id'])
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(BaseInheritsV3Test, cls).resource_cleanup()
+ super(InheritsV3TestJSON, cls).resource_cleanup()
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
-
-class InheritsV3TestJSON(BaseInheritsV3Test):
-
@decorators.idempotent_id('4e6f0366-97c8-423c-b2be-41eae6ac91c8')
def test_inherit_assign_list_check_revoke_roles_on_domains_user(self):
# Create role
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 7e70c14..82664e8 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -27,35 +27,27 @@
# Create a domain
cls.domain = cls.create_domain()
# Create project with domain
- cls.projects = list()
cls.p1_name = data_utils.rand_name('project')
cls.p1 = cls.projects_client.create_project(
cls.p1_name, enabled=False,
domain_id=cls.domain['id'])['project']
- cls.projects.append(cls.p1)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p1['id'])
cls.project_ids.append(cls.p1['id'])
# Create default project
p2_name = data_utils.rand_name('project')
cls.p2 = cls.projects_client.create_project(p2_name)['project']
- cls.projects.append(cls.p2)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p2['id'])
cls.project_ids.append(cls.p2['id'])
# Create a new project (p3) using p2 as parent project
p3_name = data_utils.rand_name('project')
cls.p3 = cls.projects_client.create_project(
p3_name, parent_id=cls.p2['id'])['project']
- cls.projects.append(cls.p3)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p3['id'])
cls.project_ids.append(cls.p3['id'])
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the projects created during setup in inverse order
- for project in reversed(cls.projects):
- cls.projects_client.delete_project(project['id'])
- # Cleanup the domain created during setup
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(ListProjectsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
def test_list_projects(self):
# List projects
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 47a3580..88cd8be 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -14,9 +14,12 @@
# under the License.
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
@@ -57,9 +60,6 @@
# Cleanup the users created during setup
for user in cls.users:
cls.users_client.delete_user(user['id'])
- # Cleanup the domain created during setup
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
super(UsersV3TestJSON, cls).resource_cleanup()
@decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
@@ -82,6 +82,11 @@
def test_list_users_with_name(self):
# List users with name
params = {'name': self.domain_enabled_user['name']}
+ # When domain specific drivers are enabled the operations
+ # of listing all users and listing all groups are not supported,
+ # they need a domain filter to be specified
+ if CONF.identity_feature_enabled.domain_specific_drivers:
+ params['domain_id'] = self.domain_enabled_user['domain_id']
self._list_users_with_params(params, 'name',
self.domain_enabled_user,
self.non_domain_enabled_user)
@@ -89,7 +94,18 @@
@decorators.idempotent_id('b30d4651-a2ea-4666-8551-0c0e49692635')
def test_list_users(self):
# List users
- body = self.users_client.list_users()['users']
+ # When domain specific drivers are enabled the operations
+ # of listing all users and listing all groups are not supported,
+ # they need a domain filter to be specified
+ if CONF.identity_feature_enabled.domain_specific_drivers:
+ body_enabled_user = self.users_client.list_users(
+ domain_id=self.domain_enabled_user['domain_id'])['users']
+ body_non_enabled_user = self.users_client.list_users(
+ domain_id=self.non_domain_enabled_user['domain_id'])['users']
+ body = (body_enabled_user + body_non_enabled_user)
+ else:
+ body = self.users_client.list_users()['users']
+
fetched_ids = [u['id'] for u in body]
missing_users = [u['id'] for u in self.users
if u['id'] not in fetched_ids]
diff --git a/tempest/api/identity/admin/v3/test_oauth_consumers.py b/tempest/api/identity/admin/v3/test_oauth_consumers.py
index 970ead3..062cce5 100644
--- a/tempest/api/identity/admin/v3/test_oauth_consumers.py
+++ b/tempest/api/identity/admin/v3/test_oauth_consumers.py
@@ -17,7 +17,7 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest.lib import exceptions as exceptions
+from tempest.lib import exceptions
class OAUTHConsumersV3Test(base.BaseIdentityV3AdminTest):
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 960e2cb..2908fc4 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -52,7 +52,6 @@
policy = self.policies_client.create_policy(blob=blob,
type=policy_type)['policy']
self.addCleanup(self._delete_policy, policy['id'])
- self.assertIn('id', policy)
self.assertIn('type', policy)
self.assertIn('blob', policy)
self.assertIsNotNone(policy['id'])
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 1b1d3f7..ac23067 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -87,7 +87,8 @@
# project and domain APIs
projects_list = self.projects_client.list_projects(
params={'is_domain': True})['projects']
- self.assertIn(project, projects_list)
+ project_ids = [p['id'] for p in projects_list]
+ self.assertIn(project['id'], project_ids)
# The domains API return different attributes for the entity, so we
# compare the entities IDs
@@ -205,3 +206,31 @@
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
+
+ @decorators.idempotent_id('d1db68b6-aebe-4fa0-b79d-d724d2e21162')
+ def test_project_get_equals_list(self):
+ fields = ['parent_id', 'is_domain', 'description', 'links',
+ 'name', 'enabled', 'domain_id', 'id', 'tags']
+
+ # Tags must be unique, keystone API will reject duplicates
+ tags = ['a', 'c', 'b', 'd']
+
+ # Create a Project, cleanup is handled in the helper
+ project = self.setup_test_project(tags=tags)
+
+ # Show and list for the project
+ project_get = self.projects_client.show_project(
+ project['id'])['project']
+ _projects = self.projects_client.list_projects()['projects']
+ project_list = next(x for x in _projects if x['id'] == project['id'])
+
+ # Assert the list of fields is correct (one is enough to check here)
+ self.assertSetEqual(set(fields), set(project_get.keys()))
+
+ # Ensure the set of tags is identical and match the expected one
+ get_tags = set(project_get.pop("tags"))
+ self.assertSetEqual(get_tags, set(project_list.pop("tags")))
+ self.assertSetEqual(get_tags, set(tags))
+
+ # Ensure all other fields are identical
+ self.assertDictEqual(project_get, project_list)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 6d42b2a..e7b005c 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -33,7 +33,6 @@
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
cls.roles.append(role)
- cls.fetched_role_ids = list()
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
@@ -59,18 +58,10 @@
cls.groups_client.delete_group(cls.group_body['id'])
cls.users_client.delete_user(cls.user_body['id'])
cls.projects_client.delete_project(cls.project['id'])
- # NOTE(harika-vakadi): It is necessary to disable the domain
- # before deleting,or else it would result in unauthorized error
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
for role in cls.roles:
cls.roles_client.delete_role(role['id'])
super(RolesV3TestJSON, cls).resource_cleanup()
- def _list_assertions(self, body, fetched_role_ids, role_id):
- self.assertEqual(len(body), 1)
- self.assertIn(role_id, fetched_role_ids)
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
def test_role_create_update_show_list(self):
@@ -104,11 +95,8 @@
roles = self.roles_client.list_user_roles_on_project(
self.project['id'], self.user_body['id'])['roles']
- for i in roles:
- self.fetched_role_ids.append(i['id'])
-
- self._list_assertions(roles, self.fetched_role_ids,
- self.role['id'])
+ self.assertEqual(1, len(roles))
+ self.assertEqual(self.role['id'], roles[0]['id'])
self.roles_client.check_user_role_existence_on_project(
self.project['id'], self.user_body['id'], self.role['id'])
@@ -124,11 +112,8 @@
roles = self.roles_client.list_user_roles_on_domain(
self.domain['id'], self.user_body['id'])['roles']
- for i in roles:
- self.fetched_role_ids.append(i['id'])
-
- self._list_assertions(roles, self.fetched_role_ids,
- self.role['id'])
+ self.assertEqual(1, len(roles))
+ self.assertEqual(self.role['id'], roles[0]['id'])
self.roles_client.check_user_role_existence_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
@@ -145,11 +130,9 @@
roles = self.roles_client.list_group_roles_on_project(
self.project['id'], self.group_body['id'])['roles']
- for i in roles:
- self.fetched_role_ids.append(i['id'])
+ self.assertEqual(1, len(roles))
+ self.assertEqual(self.role['id'], roles[0]['id'])
- self._list_assertions(roles, self.fetched_role_ids,
- self.role['id'])
# Add user to group, and insure user has role on project
self.groups_client.add_group_user(self.group_body['id'],
self.user_body['id'])
@@ -179,11 +162,8 @@
roles = self.roles_client.list_group_roles_on_domain(
self.domain['id'], self.group_body['id'])['roles']
- for i in roles:
- self.fetched_role_ids.append(i['id'])
-
- self._list_assertions(roles, self.fetched_role_ids,
- self.role['id'])
+ self.assertEqual(1, len(roles))
+ self.assertEqual(self.role['id'], roles[0]['id'])
self.roles_client.check_role_from_group_on_domain_existence(
self.domain['id'], self.group_body['id'], self.role['id'])
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index 20c8a44..5afeb98 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -69,7 +69,6 @@
service = self.services_client.create_service(
type=serv_type, name=name)['service']
self.addCleanup(self.services_client.delete_service, service['id'])
- self.assertIn('id', service)
expected_data = {'name': name, 'type': serv_type}
self.assertDictContainsSubset(expected_data, service)
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 1acc67d..0845407 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -26,6 +26,8 @@
class TokensV3TestJSON(base.BaseIdentityV3AdminTest):
+ credentials = ['primary', 'admin', 'alt']
+
@decorators.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
def test_tokens(self):
# Valid user's token is authenticated
@@ -39,6 +41,7 @@
resp = self.token.auth(user_id=user['id'],
password=u_password).response
subject_token = resp['x-subject-token']
+ self.client.check_token_existence(subject_token)
# Perform GET Token
token_details = self.client.show_token(subject_token)['token']
self.assertEqual(resp['x-subject-token'], subject_token)
@@ -46,7 +49,7 @@
self.assertEqual(token_details['user']['name'], u_name)
# Perform Delete Token
self.client.delete_token(subject_token)
- self.assertRaises(lib_exc.NotFound, self.client.show_token,
+ self.assertRaises(lib_exc.NotFound, self.client.check_token_existence,
subject_token)
@decorators.idempotent_id('565fa210-1da1-4563-999b-f7b5b67cf112')
@@ -160,16 +163,80 @@
manager_project_id]
# Get available project scopes
- available_projects =\
- self.client.list_auth_projects()['projects']
+ available_projects = self.client.list_auth_projects()['projects']
- # create list to save fetched project's id
+ # Create list to save fetched project IDs
fetched_project_ids = [i['id'] for i in available_projects]
# verifying the project ids in list
missing_project_ids = \
- [p for p in assigned_project_ids
- if p not in fetched_project_ids]
+ [p for p in assigned_project_ids if p not in fetched_project_ids]
self.assertEmpty(missing_project_ids,
- "Failed to find project_id %s in fetched list" %
+ "Failed to find project_ids %s in fetched list" %
', '.join(missing_project_ids))
+
+ @decorators.idempotent_id('ec5ecb05-af64-4c04-ac86-4d9f6f12f185')
+ def test_get_available_domain_scopes(self):
+ # Test for verifying that listing domain scopes for a user works if
+ # the user has a domain role or belongs to a group that has a domain
+ # role. For this test, admin client is used to add roles to alt user,
+ # which performs API calls, to avoid 401 Unauthorized errors.
+ alt_user_id = self.os_alt.credentials.user_id
+
+ def _create_user_domain_role_for_alt_user():
+ domain_id = self.setup_test_domain()['id']
+ role_id = self.setup_test_role()['id']
+
+ # Create a role association between the user and domain.
+ self.roles_client.create_user_role_on_domain(
+ domain_id, alt_user_id, role_id)
+ self.addCleanup(
+ self.roles_client.delete_role_from_user_on_domain,
+ domain_id, alt_user_id, role_id)
+
+ return domain_id
+
+ def _create_group_domain_role_for_alt_user():
+ domain_id = self.setup_test_domain()['id']
+ role_id = self.setup_test_role()['id']
+
+ # Create a group.
+ group_name = data_utils.rand_name('Group')
+ group_id = self.groups_client.create_group(
+ name=group_name, domain_id=domain_id)['group']['id']
+ self.addCleanup(self.groups_client.delete_group, group_id)
+
+ # Add the alt user to the group.
+ self.groups_client.add_group_user(group_id, alt_user_id)
+ self.addCleanup(self.groups_client.delete_group_user,
+ group_id, alt_user_id)
+
+ # Create a role association between the group and domain.
+ self.roles_client.create_group_role_on_domain(
+ domain_id, group_id, role_id)
+ self.addCleanup(
+ self.roles_client.delete_role_from_group_on_domain,
+ domain_id, group_id, role_id)
+
+ return domain_id
+
+ # Add the alt user to 2 random domains and 2 random groups
+ # with randomized domains and roles.
+ assigned_domain_ids = []
+ for _ in range(2):
+ domain_id = _create_user_domain_role_for_alt_user()
+ assigned_domain_ids.append(domain_id)
+ domain_id = _create_group_domain_role_for_alt_user()
+ assigned_domain_ids.append(domain_id)
+
+ # Get available domain scopes for the alt user.
+ available_domains = self.os_alt.identity_v3_client.list_auth_domains()[
+ 'domains']
+ fetched_domain_ids = [i['id'] for i in available_domains]
+
+ # Verify the expected domain IDs are in the list.
+ missing_domain_ids = \
+ [p for p in assigned_domain_ids if p not in fetched_domain_ids]
+ self.assertEmpty(missing_domain_ids,
+ "Failed to find domain_ids %s in fetched list"
+ % ", ".join(missing_domain_ids))
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 3e6a2de..2530072 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -26,22 +26,27 @@
CONF = config.CONF
-class BaseTrustsV3Test(base.BaseIdentityV3AdminTest):
+class TrustsV3TestJSON(base.BaseIdentityV3AdminTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(TrustsV3TestJSON, cls).skip_checks()
+ if not CONF.identity_feature_enabled.trust:
+ raise cls.skipException("Trusts aren't enabled")
def setUp(self):
- super(BaseTrustsV3Test, self).setUp()
+ super(TrustsV3TestJSON, self).setUp()
# Use alt_username as the trustee
- if not CONF.identity_feature_enabled.trust:
- raise self.skipException("Trusts aren't enabled")
-
self.trust_id = None
+ self.create_trustor_and_roles()
+ self.addCleanup(self.cleanup_user_and_roles)
def tearDown(self):
if self.trust_id:
# Do the delete in tearDown not addCleanup - we want the test to
# fail in the event there is a bug which causes undeletable trusts
self.delete_trust()
- super(BaseTrustsV3Test, self).tearDown()
+ super(TrustsV3TestJSON, self).tearDown()
def create_trustor_and_roles(self):
# create a project that trusts will be granted on
@@ -190,14 +195,6 @@
self.trust_id)
self.trust_id = None
-
-class TrustsV3TestJSON(BaseTrustsV3Test):
-
- def setUp(self):
- super(TrustsV3TestJSON, self).setUp()
- self.create_trustor_and_roles()
- self.addCleanup(self.cleanup_user_and_roles)
-
@decorators.idempotent_id('5a0a91a4-baef-4a14-baba-59bf4d7fcace')
def test_trust_impersonate(self):
# Test case to check we can create, get and delete a trust
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 409d4f8..3813568 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -41,31 +41,26 @@
email=u_email, enabled=False)['user']
# Delete the User at the end of this method
self.addCleanup(self.users_client.delete_user, user['id'])
+
# Creating second project for updation
project = self.setup_test_project()
+
# Updating user details with new values
- u_name2 = data_utils.rand_name('user2')
- u_email2 = u_name2 + '@testmail.tm'
- u_description2 = u_name2 + ' description'
- update_user = self.users_client.update_user(
- user['id'], name=u_name2, description=u_description2,
- project_id=project['id'],
- email=u_email2, enabled=False)['user']
- self.assertEqual(u_name2, update_user['name'])
- self.assertEqual(u_description2, update_user['description'])
- self.assertEqual(project['id'],
- update_user['project_id'])
- self.assertEqual(u_email2, update_user['email'])
- self.assertEqual(False, update_user['enabled'])
- # GET by id after updation
+ update_kwargs = {'name': data_utils.rand_name('user2'),
+ 'description': data_utils.rand_name('desc2'),
+ 'project_id': project['id'],
+ 'email': 'user2@testmail.tm',
+ 'enabled': False}
+ updated_user = self.users_client.update_user(
+ user['id'], **update_kwargs)['user']
+ for field in update_kwargs:
+ self.assertEqual(update_kwargs[field], updated_user[field])
+
+ # GET by id after updating
new_user_get = self.users_client.show_user(user['id'])['user']
# Assert response body of GET after updation
- self.assertEqual(u_name2, new_user_get['name'])
- self.assertEqual(u_description2, new_user_get['description'])
- self.assertEqual(project['id'],
- new_user_get['project_id'])
- self.assertEqual(u_email2, new_user_get['email'])
- self.assertEqual(False, new_user_get['enabled'])
+ for field in update_kwargs:
+ self.assertEqual(update_kwargs[field], new_user_get[field])
@decorators.idempotent_id('2d223a0e-e457-4a70-9fb1-febe027a0ff9')
def test_update_user_password(self):
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 4495cbf..9edccbb 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -187,6 +187,7 @@
cls.non_admin_users_client = cls.os_primary.users_v3_client
cls.non_admin_token = cls.os_primary.token_v3_client
cls.non_admin_projects_client = cls.os_primary.projects_client
+ cls.non_admin_catalog_client = cls.os_primary.catalog_client
cls.non_admin_versions_client =\
cls.os_primary.identity_versions_v3_client
@@ -248,13 +249,16 @@
if 'description' not in kwargs:
kwargs['description'] = data_utils.rand_name('desc')
domain = cls.domains_client.create_domain(**kwargs)['domain']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.delete_domain, domain['id'])
return domain
- def delete_domain(self, domain_id):
+ @classmethod
+ def delete_domain(cls, domain_id):
# NOTE(mpavlase) It is necessary to disable the domain before deleting
# otherwise it raises Forbidden exception
- self.domains_client.update_domain(domain_id, enabled=False)
- self.domains_client.delete_domain(domain_id)
+ cls.domains_client.update_domain(domain_id, enabled=False)
+ cls.domains_client.delete_domain(domain_id)
def setup_test_user(self, password=None):
"""Set up a test user."""
diff --git a/tempest/api/identity/v2/test_ec2_credentials.py b/tempest/api/identity/v2/test_ec2_credentials.py
index 599b784..237e728 100644
--- a/tempest/api/identity/v2/test_ec2_credentials.py
+++ b/tempest/api/identity/v2/test_ec2_credentials.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.identity import base
+from tempest.common import utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class EC2CredentialsTest(base.BaseIdentityV2Test):
@@ -24,7 +24,7 @@
@classmethod
def skip_checks(cls):
super(EC2CredentialsTest, cls).skip_checks()
- if not test.is_extension_enabled('OS-EC2', 'identity'):
+ if not utils.is_extension_enabled('OS-EC2', 'identity'):
msg = "OS-EC2 identity extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/identity/v3/test_catalog.py b/tempest/api/identity/v3/test_catalog.py
new file mode 100644
index 0000000..deec2dc
--- /dev/null
+++ b/tempest/api/identity/v3/test_catalog.py
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib import decorators
+
+
+CONF = config.CONF
+
+
+class IdentityCatalogTest(base.BaseIdentityV3Test):
+
+ @decorators.idempotent_id('56b57ced-22b8-4127-9b8a-565dfb0207e2')
+ def test_catalog_standardization(self):
+ # http://git.openstack.org/cgit/openstack/service-types-authority
+ # /tree/service-types.yaml
+ standard_service_values = [{'name': 'keystone', 'type': 'identity'},
+ {'name': 'nova', 'type': 'compute'},
+ {'name': 'glance', 'type': 'image'},
+ {'name': 'swift', 'type': 'object-store'}]
+ # next, we need to GET the catalog using the catalog client
+ catalog = self.non_admin_catalog_client.show_catalog()['catalog']
+ # get list of the service types present in the catalog
+ catalog_services = []
+ for service in catalog:
+ catalog_services.append(service['type'])
+ for service in standard_service_values:
+ # if service enabled, check if it has a standard typevalue
+ if service['name'] == 'keystone' or\
+ getattr(CONF.service_available, service['name']):
+ self.assertIn(service['type'], catalog_services)
diff --git a/tempest/api/identity/v3/test_projects.py b/tempest/api/identity/v3/test_projects.py
index 0ae35ea..bbb4013 100644
--- a/tempest/api/identity/v3/test_projects.py
+++ b/tempest/api/identity/v3/test_projects.py
@@ -24,8 +24,7 @@
@decorators.idempotent_id('86128d46-e170-4644-866a-cc487f699e1d')
def test_list_projects_returns_only_authorized_projects(self):
- alt_project_name =\
- self.os_alt.credentials.project_name
+ alt_project_name = self.os_alt.credentials.project_name
resp = self.non_admin_users_client.list_user_projects(
self.os_primary.credentials.user_id)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 70ba2fe..7103d56 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -46,16 +46,6 @@
cls.created_images = []
@classmethod
- def resource_cleanup(cls):
- for image_id in cls.created_images:
- test_utils.call_and_ignore_notfound_exc(
- cls.client.delete_image, image_id)
-
- for image_id in cls.created_images:
- cls.client.wait_for_resource_deletion(image_id)
- super(BaseImageTest, cls).resource_cleanup()
-
- @classmethod
def create_image(cls, data=None, **kwargs):
"""Wrapper that returns a test image."""
@@ -75,6 +65,10 @@
if 'image' in image:
image = image['image']
cls.created_images.append(image['id'])
+ cls.addClassResourceCleanup(cls.client.wait_for_resource_deletion,
+ image['id'])
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.client.delete_image, image['id'])
return image
@classmethod
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 2e68efd..c846f88 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -119,7 +119,7 @@
# Update Image
new_image_name = data_utils.rand_name('new-image')
- body = self.client.update_image(image['id'], [
+ self.client.update_image(image['id'], [
dict(replace='/name', value=new_image_name)])
# Verifying updating
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 7304db9..5068fc4 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -14,8 +14,8 @@
from tempest.api.network import base
from tempest.common import tempest_fixtures as fixtures
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class AgentManagementTestJSON(base.BaseAdminNetworkTest):
@@ -23,7 +23,7 @@
@classmethod
def skip_checks(cls):
super(AgentManagementTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('agent', 'network'):
+ if not utils.is_extension_enabled('agent', 'network'):
msg = "agent extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 485c8f5..8315c5d 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -13,8 +13,8 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest):
@@ -22,7 +22,7 @@
@classmethod
def skip_checks(cls):
super(DHCPAgentSchedulersTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
+ if not utils.is_extension_enabled('dhcp_agent_scheduler', 'network'):
msg = "dhcp_agent_scheduler extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index 7ee819e..5aa337c 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -28,7 +28,7 @@
@classmethod
def skip_checks(cls):
super(FloatingIPAdminTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
if not CONF.network.public_network_id:
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index 85b2472..1a7b0ec 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -13,10 +13,10 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
-from tempest import test
CONF = config.CONF
AGENT_TYPE = 'L3 agent'
@@ -41,7 +41,7 @@
@classmethod
def skip_checks(cls):
super(L3AgentSchedulerTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
+ if not utils.is_extension_enabled('l3_agent_scheduler', 'network'):
msg = "L3 Agent Scheduler Extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/network/admin/test_metering_extensions.py b/tempest/api/network/admin/test_metering_extensions.py
index 21a7ab4..5063fef 100644
--- a/tempest/api/network/admin/test_metering_extensions.py
+++ b/tempest/api/network/admin/test_metering_extensions.py
@@ -13,9 +13,10 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest import test
class MeteringTestJSON(base.BaseAdminNetworkTest):
@@ -28,7 +29,7 @@
@classmethod
def skip_checks(cls):
super(MeteringTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('metering', 'network'):
+ if not utils.is_extension_enabled('metering', 'network'):
msg = "metering extension not enabled."
raise cls.skipException(msg)
@@ -52,7 +53,10 @@
description=description,
name=name)
metering_label = body['metering_label']
- cls.metering_labels.append(metering_label)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.admin_metering_labels_client.delete_metering_label,
+ metering_label['id'])
return metering_label
@classmethod
@@ -64,7 +68,9 @@
remote_ip_prefix=remote_ip_prefix, direction=direction,
metering_label_id=metering_label_id)
metering_label_rule = body['metering_label_rule']
- cls.metering_label_rules.append(metering_label_rule)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ client.delete_metering_label_rule, metering_label_rule['id'])
return metering_label_rule
def _delete_metering_label(self, metering_label_id):
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 21688d2..6849653 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class QuotasNegativeTest(base.BaseAdminNetworkTest):
@@ -35,7 +35,7 @@
@classmethod
def skip_checks(cls):
super(QuotasNegativeTest, cls).skip_checks()
- if not test.is_extension_enabled('quotas', 'network'):
+ if not utils.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index aa8b2dc..cf4236d 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -14,10 +14,11 @@
# under the License.
from tempest.api.network import base
+from tempest.common import identity
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest import test
class QuotasTest(base.BaseAdminNetworkTest):
@@ -38,7 +39,7 @@
@classmethod
def skip_checks(cls):
super(QuotasTest, cls).skip_checks()
- if not test.is_extension_enabled('quotas', 'network'):
+ if not utils.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
@@ -46,10 +47,11 @@
# Add a project to conduct the test
project = data_utils.rand_name('test_project_')
description = data_utils.rand_name('desc_')
- project = self.identity_utils.create_project(name=project,
- description=description)
+ project = identity.identity_utils(self.os_admin).create_project(
+ name=project, description=description)
project_id = project['id']
- self.addCleanup(self.identity_utils.delete_project, project_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ project_id)
# Change quotas for project
quota_set = self.admin_quotas_client.update_quotas(
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index 07c4157..8cdb41e 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -16,10 +16,11 @@
import testtools
from tempest.api.network import base
+from tempest.common import identity
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -41,42 +42,23 @@
self.addCleanup(self._cleanup_router, router)
return router
- def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
- interface = self.routers_client.add_router_interface(
- router_id, subnet_id=subnet_id)
- self.addCleanup(self._remove_router_interface_with_subnet_id,
- router_id, subnet_id)
- self.assertEqual(subnet_id, interface['subnet_id'])
- return interface
-
- def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
- body = self.routers_client.remove_router_interface(router_id,
- subnet_id=subnet_id)
- self.assertEqual(subnet_id, body['subnet_id'])
-
@classmethod
def skip_checks(cls):
super(RoutersAdminTest, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
- @classmethod
- def resource_setup(cls):
- super(RoutersAdminTest, cls).resource_setup()
- cls.tenant_cidr = (CONF.network.project_network_cidr
- if cls._ip_version == 4 else
- CONF.network.project_network_v6_cidr)
-
@decorators.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
def test_create_router_setting_project_id(self):
# Test creating router from admin user setting project_id.
project = data_utils.rand_name('test_tenant_')
description = data_utils.rand_name('desc_')
- project = self.identity_utils.create_project(name=project,
- description=description)
+ project = identity.identity_utils(self.os_admin).create_project(
+ name=project, description=description)
project_id = project['id']
- self.addCleanup(self.identity_utils.delete_project, project_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ project_id)
name = data_utils.rand_name('router-')
create_body = self.admin_routers_client.create_router(
@@ -86,7 +68,7 @@
self.assertEqual(project_id, create_body['router']['tenant_id'])
@decorators.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_router_with_default_snat_value(self):
@@ -98,7 +80,7 @@
'enable_snat': True})
@decorators.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_router_with_snat_explicit(self):
@@ -160,7 +142,7 @@
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway_with_snat_explicit(self):
@@ -177,7 +159,7 @@
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway_without_snat(self):
@@ -209,7 +191,7 @@
self.assertFalse(list_body['ports'])
@decorators.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_reset_gateway_without_snat(self):
diff --git a/tempest/api/network/admin/test_routers_dvr.py b/tempest/api/network/admin/test_routers_dvr.py
index f9a0cfb..93478e6 100644
--- a/tempest/api/network/admin/test_routers_dvr.py
+++ b/tempest/api/network/admin/test_routers_dvr.py
@@ -16,17 +16,18 @@
import testtools
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class RoutersTestDVR(base.BaseAdminNetworkTest):
@classmethod
- def resource_setup(cls):
+ def skip_checks(cls):
+ super(RoutersTestDVR, cls).skip_checks()
for ext in ['router', 'dvr']:
- if not test.is_extension_enabled(ext, 'network'):
+ if not utils.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
# The check above will pass if api_extensions=all, which does
@@ -35,6 +36,9 @@
# admin credentials to create router with distributed=True attribute
# and checking for BadRequest exception and that the resulting router
# has a distributed attribute.
+
+ @classmethod
+ def resource_setup(cls):
super(RoutersTestDVR, cls).resource_setup()
name = data_utils.rand_name('pretest-check')
router = cls.admin_routers_client.create_router(name=name)
@@ -83,7 +87,7 @@
self.assertFalse(router['router']['distributed'])
@decorators.idempotent_id('acd43596-c1fb-439d-ada8-31ad48ae3c2e')
- @testtools.skipUnless(test.is_extension_enabled('l3-ha', 'network'),
+ @testtools.skipUnless(utils.is_extension_enabled('l3-ha', 'network'),
'HA routers are not available.')
def test_centralized_router_update_to_dvr(self):
"""Test centralized router update
diff --git a/tempest/api/network/admin/test_routers_negative.py b/tempest/api/network/admin/test_routers_negative.py
index f350a15..9356bcc 100644
--- a/tempest/api/network/admin/test_routers_negative.py
+++ b/tempest/api/network/admin/test_routers_negative.py
@@ -16,10 +16,10 @@
import testtools
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -29,13 +29,13 @@
@classmethod
def skip_checks(cls):
super(RoutersAdminNegativeTest, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7101cc02-058a-11e7-93e1-fa163e4fa634')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_router_set_gateway_used_ip_returns_409(self):
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 6bec0d7..bdfda0a 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -92,33 +92,17 @@
cls.subnets = []
cls.ports = []
cls.routers = []
- cls.floating_ips = []
- cls.metering_labels = []
- cls.metering_label_rules = []
cls.ethertype = "IPv" + str(cls._ip_version)
+ if cls._ip_version == 4:
+ cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ cls.mask_bits = CONF.network.project_network_mask_bits
+ elif cls._ip_version == 6:
+ cls.cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
+ cls.mask_bits = CONF.network.project_network_v6_mask_bits
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
- # Clean up floating IPs
- for floating_ip in cls.floating_ips:
- test_utils.call_and_ignore_notfound_exc(
- cls.floating_ips_client.delete_floatingip,
- floating_ip['id'])
-
- # Clean up metering label rules
- # Not all classes in the hierarchy have the client class variable
- if cls.metering_label_rules:
- label_rules_client = cls.admin_metering_label_rules_client
- for metering_label_rule in cls.metering_label_rules:
- test_utils.call_and_ignore_notfound_exc(
- label_rules_client.delete_metering_label_rule,
- metering_label_rule['id'])
- # Clean up metering labels
- for metering_label in cls.metering_labels:
- test_utils.call_and_ignore_notfound_exc(
- cls.admin_metering_labels_client.delete_metering_label,
- metering_label['id'])
# Clean up ports
for port in cls.ports:
test_utils.call_and_ignore_notfound_exc(
@@ -232,7 +216,9 @@
body = cls.floating_ips_client.create_floatingip(
floating_network_id=external_network_id)
fip = body['floatingip']
- cls.floating_ips.append(fip)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.floating_ips_client.delete_floatingip,
+ fip['id'])
return fip
@classmethod
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index a90e4bf..3075047 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -13,13 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import netaddr
import six
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -44,7 +43,7 @@
@classmethod
def skip_checks(cls):
super(AllowedAddressPairTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('allowed-address-pairs', 'network'):
+ if not utils.is_extension_enabled('allowed-address-pairs', 'network'):
msg = "Allowed Address Pairs extension not enabled."
raise cls.skipException(msg)
@@ -103,8 +102,7 @@
@decorators.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
def test_update_port_with_cidr_address_pair(self):
# Update allowed address pair with cidr
- cidr = str(netaddr.IPNetwork(CONF.network.project_network_cidr))
- self._update_port_with_address(cidr)
+ self._update_port_with_address(str(self.cidr))
@decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
def test_update_port_with_multiple_ip_mac_address_pair(self):
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index 014d064..4804ada 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -15,8 +15,8 @@
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class ExtensionsTestJSON(base.BaseNetworkTest):
@@ -40,7 +40,7 @@
'allowed-address-pairs', 'extra_dhcp_opt',
'metering', 'dvr']
expected_alias = [ext for ext in expected_alias if
- test.is_extension_enabled(ext, 'network')]
+ utils.is_extension_enabled(ext, 'network')]
actual_alias = list()
extensions = self.network_extensions_client.list_extensions()
list_extensions = extensions['extensions']
@@ -66,5 +66,5 @@
# of extensions returned, but only for those that have been
# enabled via configuration
for e in expected_alias:
- if test.is_extension_enabled(e, 'network'):
+ if utils.is_extension_enabled(e, 'network'):
self.assertIn(e, actual_alias)
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index dc9042e..0d42033 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
@@ -35,7 +35,7 @@
@classmethod
def skip_checks(cls):
super(ExtraDHCPOptionsTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
+ if not utils.is_extension_enabled('extra_dhcp_opt', 'network'):
msg = "Extra DHCP Options extension not enabled."
raise cls.skipException(msg)
@@ -75,7 +75,7 @@
def test_update_show_port_with_extra_dhcp_options(self):
# Update port with extra dhcp options
name = data_utils.rand_name('new-port-name')
- body = self.ports_client.update_port(
+ self.ports_client.update_port(
self.port['id'],
name=name,
extra_dhcp_opts=self.extra_dhcp_opts)
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index c799b15..ef4a23a 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -14,10 +14,10 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest.common.utils import net_utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -43,7 +43,7 @@
@classmethod
def skip_checks(cls):
super(FloatingIPTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
if not CONF.network.public_network_id:
diff --git a/tempest/api/network/test_floating_ips_negative.py b/tempest/api/network/test_floating_ips_negative.py
index 5ca17fe..e904a81 100644
--- a/tempest/api/network/test_floating_ips_negative.py
+++ b/tempest/api/network/test_floating_ips_negative.py
@@ -15,10 +15,10 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -34,7 +34,7 @@
@classmethod
def skip_checks(cls):
super(FloatingIPNegativeTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
if not CONF.network.public_network_id:
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 269f2c2..1c59556 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -18,12 +18,12 @@
from tempest.api.network import base
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -34,8 +34,7 @@
def resource_setup(cls):
super(BaseNetworkTestResources, cls).resource_setup()
cls.network = cls.create_network()
- cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
- cls._ip_version)
+ cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network)
cls._subnet_data = {6: {'gateway':
str(cls._get_gateway_from_tempest_conf(6)),
'allocation_pools':
@@ -64,20 +63,13 @@
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
@classmethod
- def _create_subnet_with_last_subnet_block(cls, network, ip_version):
+ def _create_subnet_with_last_subnet_block(cls, network):
# Derive last subnet CIDR block from project CIDR and
# create the subnet with that derived CIDR
- if ip_version == 4:
- cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- mask_bits = CONF.network.project_network_mask_bits
- elif ip_version == 6:
- cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
- mask_bits = CONF.network.project_network_v6_mask_bits
-
- subnet_cidr = list(cidr.subnet(mask_bits))[-1]
+ subnet_cidr = list(cls.cidr.subnet(cls.mask_bits))[-1]
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
return cls.create_subnet(network, gateway=gateway_ip,
- cidr=subnet_cidr, mask_bits=mask_bits)
+ cidr=subnet_cidr, mask_bits=cls.mask_bits)
@classmethod
def _get_gateway_from_tempest_conf(cls, ip_version):
@@ -209,7 +201,7 @@
def test_show_network_fields(self):
# Verify specific fields of a network
fields = ['id', 'name']
- if test.is_extension_enabled('net-mtu', 'network'):
+ if utils.is_extension_enabled('net-mtu', 'network'):
fields.append('mtu')
body = self.networks_client.show_network(self.network['id'],
fields=fields)
@@ -233,7 +225,7 @@
def test_list_networks_fields(self):
# Verify specific fields of the networks
fields = ['id', 'name']
- if test.is_extension_enabled('net-mtu', 'network'):
+ if utils.is_extension_enabled('net-mtu', 'network'):
fields.append('mtu')
body = self.networks_client.list_networks(fields=fields)
networks = body['networks']
@@ -370,30 +362,44 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('af774677-42a9-4e4b-bb58-16fe6a5bc1ec')
- @test.requires_ext(extension='external-net', service='network')
+ @utils.requires_ext(extension='external-net', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_external_network_visibility(self):
- """Verifies user can see external networks but not subnets."""
+ public_network_id = CONF.network.public_network_id
+
+ # find external network matching public_network_id
body = self.networks_client.list_networks(**{'router:external': True})
- networks = [network['id'] for network in body['networks']]
- self.assertNotEmpty(networks, "No external networks found")
+ external_network = next((network for network in body['networks']
+ if network['id'] == public_network_id), None)
+ self.assertIsNotNone(external_network, "Public network %s not found "
+ "in external network list"
+ % public_network_id)
nonexternal = [net for net in body['networks'] if
not net['router:external']]
self.assertEmpty(nonexternal, "Found non-external networks"
" in filtered list (%s)." % nonexternal)
- self.assertIn(CONF.network.public_network_id, networks)
+
# only check the public network ID because the other networks may
# belong to other tests and their state may have changed during this
# test
- body = self.subnets_client.list_subnets(
- network_id=CONF.network.public_network_id)
- self.assertEmpty(body['subnets'], "Public subnets visible")
+ body = self.subnets_client.list_subnets(network_id=public_network_id)
+
+ # check subnet visibility of external_network
+ if external_network['shared']:
+ self.assertNotEmpty(body['subnets'], "Subnets should be visible "
+ "for shared public network %s"
+ % public_network_id)
+ else:
+ self.assertEmpty(body['subnets'], "Subnets should not be visible "
+ "for non-shared public "
+ "network %s"
+ % public_network_id)
@decorators.idempotent_id('c72c1c0c-2193-4aca-ccc4-b1442640bbbb')
- @test.requires_ext(extension="standard-attr-description",
- service="network")
+ @utils.requires_ext(extension="standard-attr-description",
+ service="network")
def test_create_update_network_description(self):
body = self.create_network(description='d1')
self.assertEqual('d1', body['description'])
@@ -473,14 +479,8 @@
def test_bulk_create_delete_subnet(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 subnets in one request
- if self._ip_version == 4:
- cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- mask_bits = CONF.network.project_network_mask_bits
- else:
- cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
- mask_bits = CONF.network.project_network_v6_mask_bits
-
- cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
+ cidrs = [subnet_cidr
+ for subnet_cidr in self.cidr.subnet(self.mask_bits)]
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
subnets_list = []
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index f81927d..eb53fbb 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -18,11 +18,11 @@
from tempest.api.network import base_security_groups as sec_base
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
-from tempest import test
CONF = config.CONF
@@ -84,25 +84,13 @@
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
- @classmethod
- def _get_ipaddress_from_tempest_conf(cls):
- """Return subnet with mask bits for configured CIDR """
- if cls._ip_version == 4:
- cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- cidr.prefixlen = CONF.network.project_network_mask_bits
-
- elif cls._ip_version == 6:
- cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
- cidr.prefixlen = CONF.network.project_network_v6_mask_bits
-
- return cidr
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
- address = self._get_ipaddress_from_tempest_conf()
+ address = self.cidr
+ address.prefixlen = self.mask_bits
if ((address.version == 4 and address.prefixlen >= 30) or
(address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
@@ -307,7 +295,7 @@
@decorators.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
@testtools.skipUnless(
- test.is_extension_enabled('security-group', 'network'),
+ utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
@@ -315,7 +303,7 @@
@decorators.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
@testtools.skipUnless(
- test.is_extension_enabled('security-group', 'network'),
+ utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
@@ -342,7 +330,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
@testtools.skipUnless(
- test.is_extension_enabled('security-group', 'network'),
+ utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_create_port_with_no_securitygroups(self):
network = self.create_network()
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 128544b..abbb779 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -17,10 +17,10 @@
import testtools
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -55,26 +55,22 @@
@classmethod
def skip_checks(cls):
super(RoutersTest, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
- @classmethod
- def resource_setup(cls):
- super(RoutersTest, cls).resource_setup()
- cls.tenant_cidr = (CONF.network.project_network_cidr
- if cls._ip_version == 4 else
- CONF.network.project_network_v6_cidr)
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_show_list_update_delete_router(self):
# Create a router
+ name = data_utils.rand_name(self.__class__.__name__ + '-router')
router = self._create_router(
+ name=name,
admin_state_up=False,
external_network_id=CONF.network.public_network_id)
+ self.assertEqual(router['name'], name)
self.assertEqual(router['admin_state_up'], False)
self.assertEqual(
router['external_gateway_info']['network_id'],
@@ -139,35 +135,8 @@
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
- def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
- show_body = self.admin_routers_client.show_router(router_id)
- actual_ext_gw_info = show_body['router']['external_gateway_info']
- if exp_ext_gw_info is None:
- self.assertIsNone(actual_ext_gw_info)
- return
- # Verify only keys passed in exp_ext_gw_info
- for k, v in exp_ext_gw_info.items():
- self.assertEqual(v, actual_ext_gw_info[k])
-
- def _verify_gateway_port(self, router_id):
- list_body = self.admin_ports_client.list_ports(
- network_id=CONF.network.public_network_id,
- device_id=router_id)
- self.assertEqual(len(list_body['ports']), 1)
- gw_port = list_body['ports'][0]
- fixed_ips = gw_port['fixed_ips']
- self.assertNotEmpty(fixed_ips)
- # Assert that all of the IPs from the router gateway port
- # are allocated from a valid public subnet.
- public_net_body = self.admin_networks_client.show_network(
- CONF.network.public_network_id)
- public_subnet_ids = public_net_body['network']['subnets']
- for fixed_ip in fixed_ips:
- subnet_id = fixed_ip['subnet_id']
- self.assertIn(subnet_id, public_subnet_ids)
-
@decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
- @test.requires_ext(extension='ext-gw-mode', service='network')
+ @utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@decorators.skip_because(bug='1676207')
@@ -198,11 +167,11 @@
fixed_ip['ip_address'])
@decorators.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
- @test.requires_ext(extension='extraroute', service='network')
+ @utils.requires_ext(extension='extraroute', service='network')
def test_update_delete_extra_route(self):
# Create different cidr for each subnet to avoid cidr duplicate
# The cidr starts from project_cidr
- next_cidr = netaddr.IPNetwork(self.tenant_cidr)
+ next_cidr = self.cidr
# Prepare to build several routes
test_routes = []
routes_num = 4
@@ -278,7 +247,7 @@
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
- sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+ sub02_cidr = self.cidr.next()
subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
router = self._create_router()
interface01 = self._add_router_interface_with_subnet_id(router['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index 72face8..c9ce55c 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -13,14 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import netaddr
-
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -30,7 +28,7 @@
@classmethod
def skip_checks(cls):
super(RoutersNegativeTest, cls).skip_checks()
- if not test.is_extension_enabled('router', 'network'):
+ if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@@ -40,9 +38,6 @@
cls.router = cls.create_router()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
- cls.tenant_cidr = (CONF.network.project_network_cidr
- if cls._ip_version == 4 else
- CONF.network.project_network_v6_cidr)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
@@ -57,7 +52,7 @@
@decorators.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
def test_router_add_gateway_net_not_external_returns_400(self):
alt_network = self.create_network()
- sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+ sub_cidr = self.cidr.next()
self.create_subnet(alt_network, cidr=sub_cidr)
self.assertRaises(lib_exc.BadRequest,
self.routers_client.update_router,
@@ -124,17 +119,10 @@
@classmethod
def skip_checks(cls):
super(DvrRoutersNegativeTest, cls).skip_checks()
- if not test.is_extension_enabled('dvr', 'network'):
+ if not utils.is_extension_enabled('dvr', 'network'):
msg = "DVR extension not enabled."
raise cls.skipException(msg)
- @classmethod
- def resource_setup(cls):
- super(DvrRoutersNegativeTest, cls).resource_setup()
- cls.router = cls.create_router()
- cls.network = cls.create_network()
- cls.subnet = cls.create_subnet(cls.network)
-
@decorators.attr(type=['negative'])
@decorators.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9')
def test_router_create_tenant_distributed_returns_forbidden(self):
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index a121864..24bd8ea 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -14,21 +14,20 @@
# under the License.
from tempest.api.network import base_security_groups as base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
class SecGroupTest(base.BaseSecGroupTest):
- _project_network_cidr = CONF.network.project_network_cidr
@classmethod
def skip_checks(cls):
super(SecGroupTest, cls).skip_checks()
- if not test.is_extension_enabled('security-group', 'network'):
+ if not utils.is_extension_enabled('security-group', 'network'):
msg = "security-group extension not enabled."
raise cls.skipException(msg)
@@ -209,7 +208,7 @@
protocol = 'tcp'
port_range_min = 76
port_range_max = 77
- ip_prefix = self._project_network_cidr
+ ip_prefix = str(self.cidr)
self._create_verify_security_group_rule(sg_id, direction,
self.ethertype, protocol,
port_range_min,
@@ -238,4 +237,3 @@
class SecGroupIPv6Test(SecGroupTest):
_ip_version = 6
- _project_network_cidr = CONF.network.project_network_v6_cidr
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index f51fb33..d054865 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -14,22 +14,21 @@
# under the License.
from tempest.api.network import base_security_groups as base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
class NegativeSecGroupTest(base.BaseSecGroupTest):
- _project_network_cidr = CONF.network.project_network_cidr
@classmethod
def skip_checks(cls):
super(NegativeSecGroupTest, cls).skip_checks()
- if not test.is_extension_enabled('security-group', 'network'):
+ if not utils.is_extension_enabled('security-group', 'network'):
msg = "security-group extension not enabled."
raise cls.skipException(msg)
@@ -110,7 +109,7 @@
sg2_body, _ = self._create_security_group()
# Create rule specifying both remote_ip_prefix and remote_group_id
- prefix = self._project_network_cidr
+ prefix = str(self.cidr)
self.assertRaises(
lib_exc.BadRequest,
self.security_group_rules_client.create_security_group_rule,
@@ -225,7 +224,6 @@
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
_ip_version = 6
- _project_network_cidr = CONF.network.project_network_v6_cidr
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7607439c-af73-499e-bf64-f687fd12a842')
diff --git a/tempest/api/network/test_service_providers.py b/tempest/api/network/test_service_providers.py
index b90c81b..9ebcd89 100644
--- a/tempest/api/network/test_service_providers.py
+++ b/tempest/api/network/test_service_providers.py
@@ -13,15 +13,15 @@
import testtools
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class ServiceProvidersTest(base.BaseNetworkTest):
@decorators.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
@testtools.skipUnless(
- test.is_extension_enabled('service-type', 'network'),
+ utils.is_extension_enabled('service-type', 'network'),
'service-type extension not enabled.')
def test_service_providers_list(self):
body = self.service_providers_client.list_service_providers()
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index 01d7db2..bfc2609 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -13,12 +13,12 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -42,7 +42,7 @@
@classmethod
def skip_checks(cls):
super(SubnetPoolsTestJSON, cls).skip_checks()
- if not test.is_extension_enabled('subnet_allocation', 'network'):
+ if not utils.is_extension_enabled('subnet_allocation', 'network'):
msg = "subnet_allocation extension not enabled."
raise cls.skipException(msg)
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index 567a462..85f6896 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -14,11 +14,11 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -40,7 +40,7 @@
@classmethod
def skip_checks(cls):
super(TagsTest, cls).skip_checks()
- if not test.is_extension_enabled('tag', 'network'):
+ if not utils.is_extension_enabled('tag', 'network'):
msg = "tag extension not enabled."
raise cls.skipException(msg)
@@ -115,7 +115,7 @@
@classmethod
def skip_checks(cls):
super(TagsExtTest, cls).skip_checks()
- if not test.is_extension_enabled('tag-ext', 'network'):
+ if not utils.is_extension_enabled('tag-ext', 'network'):
msg = "tag-ext extension not enabled."
raise cls.skipException(msg)
@@ -131,11 +131,8 @@
prefix = CONF.network.default_network
cls.subnetpool = cls.subnetpools_client.create_subnetpool(
name=subnetpool_name, prefixes=prefix)['subnetpool']
-
- @classmethod
- def resource_cleanup(cls):
- cls.subnetpools_client.delete_subnetpool(cls.subnetpool['id'])
- super(TagsExtTest, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.subnetpools_client.delete_subnetpool,
+ cls.subnetpool['id'])
def _create_tags_for_each_resource(self):
# Create a tag for each resource in `SUPPORTED_RESOURCES` and return
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 13614cb..ee72163 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -43,7 +43,7 @@
for cont in containers:
try:
params = {'limit': 9999, 'format': 'json'}
- _, objlist = container_client.list_container_contents(cont, params)
+ _, objlist = container_client.list_container_objects(cont, params)
# delete every object in the container
for obj in objlist:
test_utils.call_and_ignore_notfound_exc(
@@ -71,9 +71,6 @@
def setup_credentials(cls):
cls.set_network_resources()
super(BaseObjectTest, cls).setup_credentials()
- # credentials may be overwritten by children classes
- if hasattr(cls, 'os_roles_operator'):
- cls.os = cls.os_roles_operator
@classmethod
def setup_clients(cls):
@@ -98,7 +95,7 @@
cls.policies = None
if CONF.object_storage_feature_enabled.discoverability:
- _, body = cls.capabilities_client.list_capabilities()
+ body = cls.capabilities_client.list_capabilities()
if 'swift' in body and 'policies' in body['swift']:
cls.policies = body['swift']['policies']
@@ -109,7 +106,7 @@
def create_container(cls):
# wrapper that returns a test container
container_name = data_utils.rand_name(name='TestContainer')
- cls.container_client.create_container(container_name)
+ cls.container_client.update_container(container_name)
cls.containers.append(container_name)
return container_name
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index e765414..6599e43 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -17,8 +17,8 @@
from tempest.api.object_storage import base
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class BulkTest(base.BaseObjectTest):
@@ -58,9 +58,9 @@
# upload an archived file
with open(filepath) as fh:
mydata = fh.read()
- resp, body = self.bulk_client.upload_archive(
+ resp = self.bulk_client.upload_archive(
upload_path='', data=mydata, archive_file_format='tar')
- return resp, body
+ return resp
def _check_contents_deleted(self, container_name):
param = {'format': 'txt'}
@@ -69,25 +69,24 @@
self.assertNotIn(container_name, body)
@decorators.idempotent_id('a407de51-1983-47cc-9f14-47c2b059413c')
- @test.requires_ext(extension='bulk_upload', service='object')
+ @utils.requires_ext(extension='bulk_upload', service='object')
def test_extract_archive(self):
# Test bulk operation of file upload with an archived file
filepath, container_name, object_name = self._create_archive()
- resp, _ = self._upload_archive(filepath)
-
+ resp = self._upload_archive(filepath)
self.containers.append(container_name)
# When uploading an archived file with the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
# custom matcher.
- self.assertIn('transfer-encoding', resp)
- self.assertIn('content-type', resp)
- self.assertIn('x-trans-id', resp)
- self.assertIn('date', resp)
+ self.assertIn('transfer-encoding', resp.response)
+ self.assertIn('content-type', resp.response)
+ self.assertIn('x-trans-id', resp.response)
+ self.assertIn('date', resp.response)
# Check only the format of common headers with custom matcher
- self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+ self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
param = {'format': 'json'}
resp, body = self.account_client.list_account_containers(param)
@@ -97,7 +96,7 @@
self.assertIn(container_name, [b['name'] for b in body])
param = {'format': 'json'}
- resp, contents_list = self.container_client.list_container_contents(
+ resp, contents_list = self.container_client.list_container_objects(
container_name, param)
self.assertHeaders(resp, 'Container', 'GET')
@@ -105,32 +104,32 @@
self.assertIn(object_name, [c['name'] for c in contents_list])
@decorators.idempotent_id('c075e682-0d2a-43b2-808d-4116200d736d')
- @test.requires_ext(extension='bulk_delete', service='object')
+ @utils.requires_ext(extension='bulk_delete', service='object')
def test_bulk_delete(self):
# Test bulk operation of deleting multiple files
filepath, container_name, object_name = self._create_archive()
self._upload_archive(filepath)
data = '%s/%s\n%s' % (container_name, object_name, container_name)
- resp, _ = self.bulk_client.delete_bulk_data(data=data)
+ resp = self.bulk_client.delete_bulk_data(data=data)
# When deleting multiple files using the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
# custom matcher.
- self.assertIn('transfer-encoding', resp)
- self.assertIn('content-type', resp)
- self.assertIn('x-trans-id', resp)
- self.assertIn('date', resp)
+ self.assertIn('transfer-encoding', resp.response)
+ self.assertIn('content-type', resp.response)
+ self.assertIn('x-trans-id', resp.response)
+ self.assertIn('date', resp.response)
# Check only the format of common headers with custom matcher
- self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+ self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
# Check if uploaded contents are completely deleted
self._check_contents_deleted(container_name)
@decorators.idempotent_id('dbea2bcb-efbb-4674-ac8a-a5a0e33d1d79')
- @test.requires_ext(extension='bulk_delete', service='object')
+ @utils.requires_ext(extension='bulk_delete', service='object')
def test_bulk_delete_by_POST(self):
# Test bulk operation of deleting multiple files
filepath, container_name, object_name = self._create_archive()
@@ -138,19 +137,19 @@
data = '%s/%s\n%s' % (container_name, object_name, container_name)
- resp, _ = self.bulk_client.delete_bulk_data_with_post(data=data)
+ resp = self.bulk_client.delete_bulk_data_with_post(data=data)
# When deleting multiple files using the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
# custom matcher.
- self.assertIn('transfer-encoding', resp)
- self.assertIn('content-type', resp)
- self.assertIn('x-trans-id', resp)
- self.assertIn('date', resp)
+ self.assertIn('transfer-encoding', resp.response)
+ self.assertIn('content-type', resp.response)
+ self.assertIn('x-trans-id', resp.response)
+ self.assertIn('date', resp.response)
# Check only the format of common headers with custom matcher
- self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+ self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
# Check if uploaded contents are completely deleted
self._check_contents_deleted(container_name)
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 092d369..48f42ec 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -13,10 +13,10 @@
# under the License.
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -29,7 +29,6 @@
@classmethod
def setup_credentials(cls):
super(AccountQuotasTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_reselleradmin = cls.os_roles_reseller
@classmethod
@@ -78,7 +77,7 @@
@decorators.attr(type="smoke")
@decorators.idempotent_id('a22ef352-a342-4587-8f47-3bbdb5b039c4')
- @test.requires_ext(extension='account_quotas', service='object')
+ @utils.requires_ext(extension='account_quotas', service='object')
def test_upload_valid_object(self):
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string()
@@ -89,7 +88,7 @@
@decorators.attr(type=["smoke"])
@decorators.idempotent_id('63f51f9f-5f1d-4fc6-b5be-d454d70949d6')
- @test.requires_ext(extension='account_quotas', service='object')
+ @utils.requires_ext(extension='account_quotas', service='object')
def test_admin_modify_quota(self):
"""Test ResellerAdmin can modify/remove the quota on a user's account
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 55a6c7a..798926b 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -13,10 +13,10 @@
# under the License.
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -29,13 +29,12 @@
@classmethod
def setup_credentials(cls):
super(AccountQuotasNegativeTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_reselleradmin = cls.os_roles_reseller
@classmethod
def resource_setup(cls):
super(AccountQuotasNegativeTest, cls).resource_setup()
- cls.container_name = cls.create_container()
+ cls.create_container()
# Retrieve a ResellerAdmin auth data and use it to set a quota
# on the client's account
@@ -77,7 +76,7 @@
@decorators.attr(type=["negative"])
@decorators.idempotent_id('d1dc5076-555e-4e6d-9697-28f1fe976324')
- @test.requires_ext(extension='account_quotas', service='object')
+ @utils.requires_ext(extension='account_quotas', service='object')
def test_user_modify_quota(self):
"""Test that a user cannot modify or remove a quota on its account."""
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 9e62046..d7c85a2 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -36,15 +36,14 @@
@classmethod
def setup_credentials(cls):
super(AccountTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_operator = cls.os_roles_operator_alt
@classmethod
def resource_setup(cls):
super(AccountTest, cls).resource_setup()
for i in range(ord('a'), ord('f') + 1):
- name = data_utils.rand_name(name='%s-' % chr(i))
- cls.container_client.create_container(name)
+ name = data_utils.rand_name(name='%s-' % six.int2byte(i))
+ cls.container_client.update_container(name)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
@@ -135,7 +134,7 @@
not CONF.object_storage_feature_enabled.discoverability,
'Discoverability function is disabled')
def test_list_extensions(self):
- resp, _ = self.capabilities_client.list_capabilities()
+ resp = self.capabilities_client.list_capabilities()
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index e98a4f5..3e664d7 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -28,7 +28,6 @@
@classmethod
def setup_credentials(cls):
super(AccountNegativeTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_operator = cls.os_roles_operator_alt
@decorators.attr(type=['negative'])
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index 4b66ebf..765bc6d 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -41,10 +41,11 @@
tenant_name = self.os_roles_operator_alt.credentials.tenant_name
username = self.os_roles_operator_alt.credentials.username
cont_headers = {'X-Container-Read': tenant_name + ':' + username}
+ container_client = self.os_roles_operator.container_client
resp_meta, _ = (
- self.os_roles_operator.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix=''))
+ container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
@@ -68,10 +69,11 @@
tenant_name = self.os_roles_operator_alt.credentials.tenant_name
username = self.os_roles_operator_alt.credentials.username
cont_headers = {'X-Container-Write': tenant_name + ':' + username}
+ container_client = self.os_roles_operator.container_client
resp_meta, _ = (
- self.os_roles_operator.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix=''))
+ container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# set alternative authentication data; cannot simply use the
# other object client.
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 655626c..90b24b4 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -29,7 +29,6 @@
@classmethod
def setup_credentials(cls):
super(ObjectACLsNegativeTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_operator = cls.os_roles_operator_alt
@classmethod
@@ -40,7 +39,7 @@
def setUp(self):
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
- self.container_client.create_container(self.container_name)
+ self.container_client.update_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
@@ -134,9 +133,10 @@
# attempt to read object using non-authorized user
# update X-Container-Read metadata ACL
cont_headers = {'X-Container-Read': 'badtenant:baduser'}
- resp_meta, _ = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ resp_meta, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
@@ -158,9 +158,10 @@
# attempt to write object using non-authorized user
# update X-Container-Write metadata ACL
cont_headers = {'X-Container-Write': 'badtenant:baduser'}
- resp_meta, _ = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ resp_meta, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without rights
self.object_client.auth_provider.set_alt_auth_data(
@@ -183,9 +184,10 @@
cont_headers = {'X-Container-Read':
tenant_name + ':' + username,
'X-Container-Write': ''}
- resp_meta, _ = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ resp_meta, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without write rights
self.object_client.auth_provider.set_alt_auth_data(
@@ -208,9 +210,10 @@
cont_headers = {'X-Container-Read':
tenant_name + ':' + username,
'X-Container-Write': ''}
- resp_meta, _ = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ resp_meta, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index 8266341..982c4a1 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -14,10 +14,10 @@
# under the License.
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
QUOTA_BYTES = 10
QUOTA_COUNT = 3
@@ -40,8 +40,8 @@
self.container_name = self.create_container()
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
- self.container_client.update_container_metadata(
- self.container_name, metadata)
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
@@ -49,7 +49,7 @@
super(ContainerQuotasTest, self).tearDown()
@decorators.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
- @test.requires_ext(extension='container_quotas', service='object')
+ @utils.requires_ext(extension='container_quotas', service='object')
@decorators.attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
@@ -66,7 +66,7 @@
self.assertEqual(nbefore + len(data), nafter)
@decorators.idempotent_id('22eeeb2b-3668-4160-baef-44790f65a5a0')
- @test.requires_ext(extension='container_quotas', service='object')
+ @utils.requires_ext(extension='container_quotas', service='object')
@decorators.attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object larger than the bytes quota."""
@@ -83,7 +83,7 @@
self.assertEqual(nbefore, nafter)
@decorators.idempotent_id('3a387039-697a-44fc-a9c0-935de31f426b')
- @test.requires_ext(extension='container_quotas', service='object')
+ @utils.requires_ext(extension='container_quotas', service='object')
@decorators.attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 76fe8d4..cdc420e 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -27,7 +27,7 @@
@decorators.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
def test_create_container(self):
container_name = data_utils.rand_name(name='TestContainer')
- resp, _ = self.container_client.create_container(container_name)
+ resp, _ = self.container_client.update_container(container_name)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@@ -35,20 +35,20 @@
def test_create_container_overwrite(self):
# overwrite container with the same name
container_name = data_utils.rand_name(name='TestContainer')
- self.container_client.create_container(container_name)
+ self.container_client.update_container(container_name)
self.containers.append(container_name)
- resp, _ = self.container_client.create_container(container_name)
+ resp, _ = self.container_client.update_container(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@decorators.idempotent_id('c2ac4d59-d0f5-40d5-ba19-0635056d48cd')
def test_create_container_with_metadata_key(self):
# create container with the blank value of metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta': ''}
- resp, _ = self.container_client.create_container(
+ headers = {'X-Container-Meta-test-container-meta': ''}
+ resp, _ = self.container_client.update_container(
container_name,
- metadata=metadata)
+ **headers)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@@ -64,10 +64,10 @@
container_name = data_utils.rand_name(name='TestContainer')
# metadata name using underscores should be converted to hyphens
- metadata = {'test_container_meta': 'Meta1'}
- resp, _ = self.container_client.create_container(
+ headers = {'X-Container-Meta-test_container_meta': 'Meta1'}
+ resp, _ = self.container_client.update_container(
container_name,
- metadata=metadata)
+ **headers)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@@ -75,22 +75,20 @@
container_name)
self.assertIn('x-container-meta-test-container-meta', resp)
self.assertEqual(resp['x-container-meta-test-container-meta'],
- metadata['test_container_meta'])
+ headers['X-Container-Meta-test_container_meta'])
@decorators.idempotent_id('24d16451-1c0c-4e4f-b59c-9840a3aba40e')
def test_create_container_with_remove_metadata_key(self):
# create container with the blank value of remove metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata_1 = {'test-container-meta': 'Meta1'}
- self.container_client.create_container(
- container_name,
- metadata=metadata_1)
+ headers = {'X-Container-Meta-test-container-meta': 'Meta1'}
+ self.container_client.update_container(container_name, **headers)
self.containers.append(container_name)
- metadata_2 = {'test-container-meta': ''}
- resp, _ = self.container_client.create_container(
+ headers = {'X-Remove-Container-Meta-test-container-meta': ''}
+ resp, _ = self.container_client.update_container(
container_name,
- remove_metadata=metadata_2)
+ **headers)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
@@ -101,14 +99,13 @@
def test_create_container_with_remove_metadata_value(self):
# create container with remove metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata)
+ headers = {'X-Container-Meta-test-container-meta': 'Meta1'}
+ self.container_client.update_container(container_name, **headers)
self.containers.append(container_name)
-
- resp, _ = self.container_client.create_container(
+ headers = {'X-Remove-Container-Meta-test-container-meta': 'Meta1'}
+ resp, _ = self.container_client.update_container(
container_name,
- remove_metadata=metadata)
+ **headers)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
@@ -130,7 +127,7 @@
container_name = self.create_container()
object_name, _ = self.create_object(container_name)
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual([object_name], object_list)
@@ -140,7 +137,7 @@
# get empty container contents list
container_name = self.create_container()
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEmpty(object_list)
@@ -153,7 +150,7 @@
self.create_object(container_name, object_name)
params = {'delimiter': '/'}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -166,7 +163,7 @@
object_name, _ = self.create_object(container_name)
params = {'end_marker': object_name + 'zzzz'}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -179,7 +176,7 @@
self.create_object(container_name)
params = {'format': 'json'}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -198,7 +195,7 @@
self.create_object(container_name)
params = {'format': 'xml'}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -222,7 +219,7 @@
object_name, _ = self.create_object(container_name)
params = {'limit': data_utils.rand_int_id(1, 10000)}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -235,7 +232,7 @@
object_name, _ = self.create_object(container_name)
params = {'marker': 'AaaaObject1234567890'}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -250,7 +247,7 @@
self.create_object(container_name, object_name)
params = {'path': 'Swift'}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -264,7 +261,7 @@
prefix_key = object_name[0:8]
params = {'prefix': prefix_key}
- resp, object_list = self.container_client.list_container_contents(
+ resp, object_list = self.container_client.list_container_objects(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
@@ -277,9 +274,9 @@
container_name = self.create_container()
metadata = {'name': 'Pictures'}
- self.container_client.update_container_metadata(
+ self.container_client.create_update_or_delete_container_metadata(
container_name,
- metadata=metadata)
+ create_update_metadata=metadata)
resp, _ = self.container_client.list_container_metadata(
container_name)
@@ -301,16 +298,16 @@
def test_update_container_metadata_with_create_and_delete_metadata(self):
# Send one request of adding and deleting metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata_1 = {'test-container-meta1': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata_1)
+ metadata_1 = {'X-Container-Meta-test-container-meta1': 'Meta1'}
+ self.container_client.update_container(container_name, **metadata_1)
self.containers.append(container_name)
metadata_2 = {'test-container-meta2': 'Meta2'}
- resp, _ = self.container_client.update_container_metadata(
- container_name,
- metadata=metadata_2,
- remove_metadata=metadata_1)
+ resp, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name,
+ create_update_metadata=metadata_2,
+ delete_metadata={'test-container-meta1': 'Meta1'}))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
@@ -326,9 +323,10 @@
container_name = self.create_container()
metadata = {'test-container-meta1': 'Meta1'}
- resp, _ = self.container_client.update_container_metadata(
- container_name,
- metadata=metadata)
+ resp, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name,
+ create_update_metadata=metadata))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
@@ -341,14 +339,14 @@
def test_update_container_metadata_with_delete_metadata(self):
# update container metadata using delete metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta1': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata)
+ metadata = {'X-Container-Meta-test-container-meta1': 'Meta1'}
+ self.container_client.update_container(container_name, **metadata)
self.containers.append(container_name)
- resp, _ = self.container_client.delete_container_metadata(
- container_name,
- metadata=metadata)
+ resp, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name,
+ delete_metadata={'test-container-meta1': 'Meta1'}))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
@@ -361,9 +359,10 @@
container_name = self.create_container()
metadata = {'test-container-meta1': ''}
- resp, _ = self.container_client.update_container_metadata(
- container_name,
- metadata=metadata)
+ resp, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name,
+ create_update_metadata=metadata))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
@@ -374,15 +373,15 @@
def test_update_container_metadata_with_delete_metadata_key(self):
# update container metadata with a blank value of metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta1': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata)
+ headers = {'X-Container-Meta-test-container-meta1': 'Meta1'}
+ self.container_client.update_container(container_name, **headers)
self.containers.append(container_name)
metadata = {'test-container-meta1': ''}
- resp, _ = self.container_client.delete_container_metadata(
- container_name,
- metadata=metadata)
+ resp, _ = (
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name,
+ delete_metadata=metadata))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(container_name)
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index a8d70c5..b8c83b7 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -32,7 +32,7 @@
if CONF.object_storage_feature_enabled.discoverability:
# use /info to get default constraints
- _, body = cls.capabilities_client.list_capabilities()
+ body = cls.capabilities_client.list_capabilities()
cls.constraints = body['swift']
@decorators.attr(type=["negative"])
@@ -45,9 +45,10 @@
max_length = self.constraints['max_container_name_length']
# create a container with long name
container_name = data_utils.arbitrary_string(size=max_length + 1)
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name)
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name)
self.assertIn('Container name length of ' + str(max_length + 1) +
' longer than ' + str(max_length), str(ex))
@@ -61,11 +62,13 @@
# that is longer than max.
max_length = self.constraints['max_meta_name_length']
container_name = data_utils.rand_name(name='TestContainer')
- metadata_name = data_utils.arbitrary_string(size=max_length + 1)
+ metadata_name = 'X-Container-Meta-' + data_utils.arbitrary_string(
+ size=max_length + 1)
metadata = {metadata_name: 'penguin'}
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name, metadata=metadata)
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name, **metadata)
self.assertIn('Metadata name too long', str(ex))
@decorators.attr(type=["negative"])
@@ -79,10 +82,11 @@
max_length = self.constraints['max_meta_value_length']
container_name = data_utils.rand_name(name='TestContainer')
metadata_value = data_utils.arbitrary_string(size=max_length + 1)
- metadata = {'animal': metadata_value}
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name, metadata=metadata)
+ metadata = {'X-Container-Meta-animal': metadata_value}
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name, **metadata)
self.assertIn('Metadata value longer than ' + str(max_length), str(ex))
@decorators.attr(type=["negative"])
@@ -97,11 +101,12 @@
container_name = data_utils.rand_name(name='TestContainer')
metadata = {}
for i in range(max_count + 1):
- metadata['animal-' + str(i)] = 'penguin'
+ metadata['X-Container-Meta-animal-' + str(i)] = 'penguin'
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name, metadata=metadata)
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name, **metadata)
self.assertIn('Too many metadata items; max ' + str(max_count),
str(ex))
@@ -120,9 +125,10 @@
# Attempts to update metadata using a nonexistent container name.
metadata = {'animal': 'penguin'}
- self.assertRaises(exceptions.NotFound,
- self.container_client.update_container_metadata,
- 'nonexistent_container_name', metadata)
+ self.assertRaises(
+ exceptions.NotFound,
+ self.container_client.create_update_or_delete_container_metadata,
+ 'nonexistent_container_name', create_update_metadata=metadata)
@decorators.attr(type=["negative"])
@decorators.idempotent_id('65387dbf-a0e2-4aac-9ddc-16eb3f1f69ba')
@@ -130,9 +136,10 @@
# Attempts to delete metadata using a nonexistent container name.
metadata = {'animal': 'penguin'}
- self.assertRaises(exceptions.NotFound,
- self.container_client.delete_container_metadata,
- 'nonexistent_container_name', metadata)
+ self.assertRaises(
+ exceptions.NotFound,
+ self.container_client.create_update_or_delete_container_metadata,
+ 'nonexistent_container_name', delete_metadata=metadata)
@decorators.attr(type=["negative"])
@decorators.idempotent_id('14331d21-1e81-420a-beea-19cb5e5207f5')
@@ -141,7 +148,7 @@
# that doesn't exist.
params = {'limit': 9999, 'format': 'json'}
self.assertRaises(exceptions.NotFound,
- self.container_client.list_container_contents,
+ self.container_client.list_container_objects,
'nonexistent_container_name', params)
@decorators.attr(type=["negative"])
@@ -155,7 +162,7 @@
self.assertHeaders(resp, 'Container', 'DELETE')
params = {'limit': 9999, 'format': 'json'}
self.assertRaises(exceptions.NotFound,
- self.container_client.list_container_contents,
+ self.container_client.list_container_objects,
container_name, params)
@decorators.attr(type=["negative"])
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 378061a..1243b83 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -14,10 +14,10 @@
from tempest.api.object_storage import base
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class StaticWebTest(base.BaseObjectTest):
@@ -27,17 +27,17 @@
super(StaticWebTest, cls).resource_setup()
# This header should be posted on the container before every test
- cls.headers_public_read_acl = {'Read': '.r:*,.rlistings'}
+ headers_public_read_acl = {'Read': '.r:*,.rlistings'}
# Create test container and create one object in it
cls.container_name = cls.create_container()
cls.object_name, cls.object_data = cls.create_object(
cls.container_name)
- cls.container_client.update_container_metadata(
+ cls.container_client.create_update_or_delete_container_metadata(
cls.container_name,
- metadata=cls.headers_public_read_acl,
- metadata_prefix="X-Container-")
+ create_update_metadata=headers_public_read_acl,
+ create_update_metadata_prefix="X-Container-")
@classmethod
def resource_cleanup(cls):
@@ -45,12 +45,12 @@
super(StaticWebTest, cls).resource_cleanup()
@decorators.idempotent_id('c1f055ab-621d-4a6a-831f-846fcb578b8b')
- @test.requires_ext(extension='staticweb', service='object')
+ @utils.requires_ext(extension='staticweb', service='object')
def test_web_index(self):
headers = {'web-index': self.object_name}
- self.container_client.update_container_metadata(
- self.container_name, metadata=headers)
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=headers)
# Maintain original headers, no auth added
self.account_client.auth_provider.set_alt_auth_data(
@@ -68,20 +68,21 @@
self.assertEqual(body, self.object_data)
# clean up before exiting
- self.container_client.update_container_metadata(self.container_name,
- {'web-index': ""})
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name,
+ create_update_metadata={'web-index': ""})
_, body = self.container_client.list_container_metadata(
self.container_name)
self.assertNotIn('x-container-meta-web-index', body)
@decorators.idempotent_id('941814cf-db9e-4b21-8112-2b6d0af10ee5')
- @test.requires_ext(extension='staticweb', service='object')
+ @utils.requires_ext(extension='staticweb', service='object')
def test_web_listing(self):
headers = {'web-listings': 'true'}
- self.container_client.update_container_metadata(
- self.container_name, metadata=headers)
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=headers)
# test GET on http://account_url/container_name
# we should retrieve a listing of objects
@@ -100,21 +101,21 @@
self.assertIn(self.object_name, body.decode())
# clean up before exiting
- self.container_client.update_container_metadata(self.container_name,
- {'web-listings': ""})
-
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name,
+ create_update_metadata={'web-listings': ""})
_, body = self.container_client.list_container_metadata(
self.container_name)
self.assertNotIn('x-container-meta-web-listings', body)
@decorators.idempotent_id('bc37ec94-43c8-4990-842e-0e5e02fc8926')
- @test.requires_ext(extension='staticweb', service='object')
+ @utils.requires_ext(extension='staticweb', service='object')
def test_web_listing_css(self):
headers = {'web-listings': 'true',
'web-listings-css': 'listings.css'}
- self.container_client.update_container_metadata(
- self.container_name, metadata=headers)
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=headers)
# Maintain original headers, no auth added
self.account_client.auth_provider.set_alt_auth_data(
@@ -131,13 +132,13 @@
self.assertIn(css, body.decode())
@decorators.idempotent_id('f18b4bef-212e-45e7-b3ca-59af3a465f82')
- @test.requires_ext(extension='staticweb', service='object')
+ @utils.requires_ext(extension='staticweb', service='object')
def test_web_error(self):
headers = {'web-listings': 'true',
'web-error': self.object_name}
- self.container_client.update_container_metadata(
- self.container_name, metadata=headers)
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=headers)
# Create object to return when requested object not found
object_name_404 = "404" + self.object_name
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 4cb1914..042d288 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -41,7 +41,6 @@
@classmethod
def setup_credentials(cls):
super(ContainerSyncTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_alt = cls.os_roles_operator_alt
@classmethod
@@ -103,7 +102,7 @@
while self.attempts > 0:
object_lists = []
for c_client, cont in zip(cont_client, self.containers):
- resp, object_list = c_client.list_container_contents(
+ resp, object_list = c_client.list_container_objects(
cont, params=params)
object_lists.append(dict(
(obj['name'], obj) for obj in object_list))
diff --git a/tempest/api/object_storage/test_container_sync_middleware.py b/tempest/api/object_storage/test_container_sync_middleware.py
index 9eae138..e77b079 100644
--- a/tempest/api/object_storage/test_container_sync_middleware.py
+++ b/tempest/api/object_storage/test_container_sync_middleware.py
@@ -13,9 +13,9 @@
# under the License.
from tempest.api.object_storage import test_container_sync
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -39,7 +39,7 @@
@decorators.attr(type='slow')
@decorators.idempotent_id('ea4645a1-d147-4976-82f7-e5a7a3065f80')
- @test.requires_ext(extension='container_sync', service='object')
+ @utils.requires_ext(extension='container_sync', service='object')
def test_container_synchronization(self):
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index c47aa93..f61d9f8 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -14,8 +14,8 @@
from tempest.api.object_storage import base
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest.lib import decorators
-from tempest import test
class CrossdomainTest(base.BaseObjectTest):
@@ -38,7 +38,7 @@
self.account_client.skip_path()
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
- @test.requires_ext(extension='crossdomain', service='object')
+ @utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
resp, body = self.account_client.get("crossdomain.xml", {})
body = body.decode()
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index ed1be90..86f7c8c 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -40,10 +40,10 @@
def _test_object_expiry(self, metadata):
# update object metadata
resp, _ = \
- self.object_client.update_object_metadata(self.container_name,
- self.object_name,
- metadata,
- metadata_prefix='')
+ self.object_client.create_or_update_object_metadata(
+ self.container_name,
+ self.object_name,
+ headers=metadata)
# verify object metadata
resp, _ = \
self.object_client.list_object_metadata(self.container_name,
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index 3a2233a..cd834bf 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -19,9 +19,9 @@
from six.moves.urllib import parse as urlparse
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class ObjectFormPostTest(base.BaseObjectTest):
@@ -108,7 +108,7 @@
return body, content_type
@decorators.idempotent_id('80fac02b-6e54-4f7b-be0d-a965b5cbef76')
- @test.requires_ext(extension='formpost', service='object')
+ @utils.requires_ext(extension='formpost', service='object')
def test_post_object_using_form(self):
body, content_type = self.get_multipart_form()
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index c56d91a..df6a0fd 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -19,10 +19,10 @@
from six.moves.urllib import parse as urlparse
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class ObjectFormPostNegativeTest(base.BaseObjectTest):
@@ -109,7 +109,7 @@
return body, content_type
@decorators.idempotent_id('d3fb3c4d-e627-48ce-9379-a1631f21336d')
- @test.requires_ext(extension='formpost', service='object')
+ @utils.requires_ext(extension='formpost', service='object')
@decorators.attr(type=['negative'])
def test_post_object_using_form_expired(self):
body, content_type = self.get_multipart_form(expires=1)
@@ -126,7 +126,7 @@
self.assertIn('FormPost: Form Expired', str(exc))
@decorators.idempotent_id('b277257f-113c-4499-b8d1-5fead79f7360')
- @test.requires_ext(extension='formpost', service='object')
+ @utils.requires_ext(extension='formpost', service='object')
@decorators.attr(type=['negative'])
def test_post_object_using_form_invalid_signature(self):
self.key = "Wrong"
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index b29a77f..acb578d 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -48,8 +48,9 @@
data_segments = [data + str(i) for i in range(segments)]
# uploading segments
for i in range(segments):
- self.object_client.create_object_segments(
- self.container_name, object_name, i, data_segments[i])
+ obj_name = "%s/%s" % (object_name, i)
+ self.object_client.create_object(
+ self.container_name, obj_name, data_segments[i])
return object_name, data_segments
@@ -184,12 +185,15 @@
# create object with transfer_encoding
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(1024)
- _, _, resp_headers = self.object_client.put_object_with_chunk(
- container=self.container_name,
- name=object_name,
- contents=data_utils.chunkify(data, 512)
- )
- self.assertHeaders(resp_headers, 'Object', 'PUT')
+ headers = {'Transfer-Encoding': 'chunked'}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data=data_utils.chunkify(data, 512),
+ headers=headers,
+ chunked=True)
+
+ self.assertHeaders(resp, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
@@ -325,11 +329,10 @@
object_name, _ = self.create_object(self.container_name)
metadata = {'X-Object-Meta-test-meta': 'Meta'}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- metadata,
- metadata_prefix='')
+ headers=metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -350,11 +353,10 @@
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -375,11 +377,10 @@
update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
'X-Remove-Object-Meta-test-meta1': 'Meta1'}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -403,11 +404,10 @@
metadata=None)
object_prefix = '%s/%s' % (self.container_name, object_name)
update_metadata = {'X-Object-Manifest': object_prefix}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -422,11 +422,10 @@
object_name, _ = self.create_object(self.container_name)
update_metadata = {'X-Object-Meta-test-meta': ''}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -447,11 +446,10 @@
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -728,8 +726,13 @@
dst_object_name,
dst_data)
# copy source object to destination
- resp, _ = self.object_client.copy_object_in_same_container(
- self.container_name, src_object_name, dst_object_name)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
+ str(src_object_name))
+ resp, body = self.object_client.create_object(self.container_name,
+ dst_object_name,
+ data=None,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check data
@@ -749,8 +752,14 @@
# change the content type of the object
metadata = {'content-type': 'text/plain; charset=UTF-8'}
self.assertNotEqual(resp_tmp['content-type'], metadata['content-type'])
- resp, _ = self.object_client.copy_object_in_same_container(
- self.container_name, object_name, object_name, metadata)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
+ str(object_name))
+ resp, body = self.object_client.create_object(self.container_name,
+ object_name,
+ data=None,
+ metadata=metadata,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check the content type
@@ -786,12 +795,12 @@
def test_copy_object_across_containers(self):
# create a container to use as a source container
src_container_name = data_utils.rand_name(name='TestSourceContainer')
- self.container_client.create_container(src_container_name)
+ self.container_client.update_container(src_container_name)
self.containers.append(src_container_name)
# create a container to use as a destination container
dst_container_name = data_utils.rand_name(
name='TestDestinationContainer')
- self.container_client.create_container(dst_container_name)
+ self.container_client.update_container(dst_container_name)
self.containers.append(dst_container_name)
# create object in source container
object_name = data_utils.rand_name(name='Object')
@@ -801,16 +810,21 @@
# set object metadata
meta_key = data_utils.rand_name(name='test')
meta_value = data_utils.rand_name(name='MetaValue')
- orig_metadata = {meta_key: meta_value}
- resp, _ = self.object_client.update_object_metadata(src_container_name,
- object_name,
- orig_metadata)
+ orig_metadata = {'X-Object-Meta-' + meta_key: meta_value}
+ resp, _ = self.object_client.create_or_update_object_metadata(
+ src_container_name,
+ object_name,
+ headers=orig_metadata)
self.assertHeaders(resp, 'Object', 'POST')
# copy object from source container to destination container
- resp, _ = self.object_client.copy_object_across_containers(
- src_container_name, object_name, dst_container_name,
- object_name)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(src_container_name),
+ str(object_name))
+ resp, body = self.object_client.create_object(dst_container_name,
+ object_name,
+ data=None,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check if object is present in destination container
@@ -897,8 +911,9 @@
data_segments = [data + str(i) for i in range(segments)]
# uploading segments
for i in range(segments):
- resp, _ = self.object_client.create_object_segments(
- self.container_name, object_name, i, data_segments[i])
+ obj_name = "%s/%s" % (object_name, i)
+ resp, _ = self.object_client.create_object(
+ self.container_name, obj_name, data_segments[i])
# creating a manifest file
metadata = {'X-Object-Manifest': '%s/%s/'
% (self.container_name, object_name)}
@@ -906,8 +921,8 @@
object_name, data='')
self.assertHeaders(resp, 'Object', 'PUT')
- resp, _ = self.object_client.update_object_metadata(
- self.container_name, object_name, metadata, metadata_prefix='')
+ resp, _ = self.object_client.create_or_update_object_metadata(
+ self.container_name, object_name, headers=metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -967,18 +982,17 @@
@classmethod
def setup_credentials(cls):
super(PublicObjectTest, cls).setup_credentials()
- cls.os = cls.os_roles_operator
cls.os_alt = cls.os_roles_operator_alt
@classmethod
def setup_clients(cls):
super(PublicObjectTest, cls).setup_clients()
- cls.identity_client_alt = cls.os_alt.identity_client
+ cls.object_client_alt = cls.os_alt.object_client
def setUp(self):
super(PublicObjectTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
- self.container_client.create_container(self.container_name)
+ self.container_client.update_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
@@ -991,8 +1005,11 @@
# update container metadata to make it publicly readable
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
- resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers, metadata_prefix='')
+ resp_meta, body = (
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name,
+ create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
@@ -1026,9 +1043,10 @@
# make container public-readable and access an object in it using
# another user's credentials
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
- resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ resp_meta, body = (
+ self.container_client.create_update_or_delete_container_metadata(
+ self.container_name, create_update_metadata=cont_headers,
+ create_update_metadata_prefix=''))
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
@@ -1047,7 +1065,7 @@
self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
# get auth token of alternative user
- alt_auth_data = self.identity_client_alt.auth_provider.auth_data
+ alt_auth_data = self.object_client_alt.auth_provider.auth_data
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=alt_auth_data
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 894e42d..c66776e 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -18,10 +18,10 @@
from tempest.api.object_storage import base
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest import test
# Each segment, except for the final one, must be at least 1 megabyte
MIN_SEGMENT_SIZE = 1024 * 1024
@@ -107,7 +107,7 @@
self.assertHeaders(resp, 'Object', method)
@decorators.idempotent_id('2c3f24a6-36e8-4711-9aa2-800ee1fc7b5b')
- @test.requires_ext(extension='slo', service='object')
+ @utils.requires_ext(extension='slo', service='object')
def test_upload_manifest(self):
# create static large object from multipart manifest
manifest = self._create_manifest()
@@ -122,7 +122,7 @@
self._assertHeadersSLO(resp, 'PUT')
@decorators.idempotent_id('e69ad766-e1aa-44a2-bdd2-bf62c09c1456')
- @test.requires_ext(extension='slo', service='object')
+ @utils.requires_ext(extension='slo', service='object')
def test_list_large_object_metadata(self):
# list static large object metadata using multipart manifest
object_name = self._create_large_object()
@@ -134,7 +134,7 @@
self._assertHeadersSLO(resp, 'HEAD')
@decorators.idempotent_id('49bc49bc-dd1b-4c0f-904e-d9f10b830ee8')
- @test.requires_ext(extension='slo', service='object')
+ @utils.requires_ext(extension='slo', service='object')
def test_retrieve_large_object(self):
# list static large object using multipart manifest
object_name = self._create_large_object()
@@ -149,7 +149,7 @@
self.assertEqual(body, sum_data)
@decorators.idempotent_id('87b6dfa1-abe9-404d-8bf0-6c3751e6aa77')
- @test.requires_ext(extension='slo', service='object')
+ @utils.requires_ext(extension='slo', service='object')
def test_delete_large_object(self):
# delete static large object using multipart manifest
object_name = self._create_large_object()
@@ -172,6 +172,6 @@
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
- resp, body = self.container_client.list_container_contents(
+ resp, body = self.container_client.list_container_objects(
self.container_name)
self.assertEqual(int(resp['x-container-object-count']), 0)
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index 91bc677..b99f93a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -19,9 +19,9 @@
from six.moves.urllib import parse as urlparse
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
class ObjectTempUrlTest(base.BaseObjectTest):
@@ -88,7 +88,7 @@
return url
@decorators.idempotent_id('f91c96d4-1230-4bba-8eb9-84476d18d991')
- @test.requires_ext(extension='tempurl', service='object')
+ @utils.requires_ext(extension='tempurl', service='object')
def test_get_object_using_temp_url(self):
expires = self._get_expiry_date()
@@ -103,11 +103,11 @@
self.assertEqual(body, self.content)
# Testing a HEAD on this Temp URL
- resp, body = self.object_client.head(url)
+ resp, _ = self.object_client.head(url)
self.assertHeaders(resp, 'Object', 'HEAD')
@decorators.idempotent_id('671f9583-86bd-4128-a034-be282a68c5d8')
- @test.requires_ext(extension='tempurl', service='object')
+ @utils.requires_ext(extension='tempurl', service='object')
def test_get_object_using_temp_url_key_2(self):
key2 = 'Meta2-'
metadata = {'Temp-URL-Key-2': key2}
@@ -132,7 +132,7 @@
self.assertEqual(body, self.content)
@decorators.idempotent_id('9b08dade-3571-4152-8a4f-a4f2a873a735')
- @test.requires_ext(extension='tempurl', service='object')
+ @utils.requires_ext(extension='tempurl', service='object')
def test_put_object_using_temp_url(self):
new_data = data_utils.random_bytes(size=len(self.object_name))
@@ -142,11 +142,11 @@
expires, self.key)
# trying to put random data in the object using temp url
- resp, body = self.object_client.put(url, new_data, None)
+ resp, _ = self.object_client.put(url, new_data, None)
self.assertHeaders(resp, 'Object', 'PUT')
# Testing a HEAD on this Temp URL
- resp, body = self.object_client.head(url)
+ resp, _ = self.object_client.head(url)
self.assertHeaders(resp, 'Object', 'HEAD')
# Validate that the content of the object has been modified
@@ -158,7 +158,7 @@
self.assertEqual(body, new_data)
@decorators.idempotent_id('249a0111-5ad3-4534-86a7-1993d55f9185')
- @test.requires_ext(extension='tempurl', service='object')
+ @utils.requires_ext(extension='tempurl', service='object')
def test_head_object_using_temp_url(self):
expires = self._get_expiry_date()
@@ -172,7 +172,7 @@
self.assertHeaders(resp, 'Object', 'HEAD')
@decorators.idempotent_id('9d9cfd90-708b-465d-802c-e4a8090b823d')
- @test.requires_ext(extension='tempurl', service='object')
+ @utils.requires_ext(extension='tempurl', service='object')
def test_get_object_using_temp_url_with_inline_query_parameter(self):
expires = self._get_expiry_date()
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 3edaa86..17ae6c1 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -19,10 +19,10 @@
from six.moves.urllib import parse as urlparse
from tempest.api.object_storage import base
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
class ObjectTempUrlNegativeTest(base.BaseObjectTest):
@@ -65,10 +65,10 @@
# create object
self.object_name = data_utils.rand_name(name='ObjectTemp')
- self.content = data_utils.arbitrary_string(size=len(self.object_name),
- base_text=self.object_name)
+ content = data_utils.arbitrary_string(size=len(self.object_name),
+ base_text=self.object_name)
self.object_client.create_object(self.container_name,
- self.object_name, self.content)
+ self.object_name, content)
def _get_expiry_date(self, expiration_time=1000):
return int(time.time() + expiration_time)
@@ -94,7 +94,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5a583aca-c804-41ba-9d9a-e7be132bdf0b')
- @test.requires_ext(extension='tempurl', service='object')
+ @utils.requires_ext(extension='tempurl', service='object')
def test_get_object_after_expiration_time(self):
expires = self._get_expiry_date(1)
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index dc0d179..51b0a1d 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -51,18 +51,16 @@
def test_versioned_container(self):
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
- resp, body = self.container_client.create_container(
- vers_container_name)
+ resp, _ = self.container_client.update_container(vers_container_name)
self.containers.append(vers_container_name)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
base_container_name = data_utils.rand_name(name='TestBaseContainer')
headers = {'X-versions-Location': vers_container_name}
- resp, body = self.container_client.create_container(
+ resp, _ = self.container_client.update_container(
base_container_name,
- metadata=headers,
- metadata_prefix='')
+ **headers)
self.containers.append(base_container_name)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(base_container_name, '0', '0',
@@ -76,20 +74,20 @@
data_2 = data_utils.random_bytes()
resp, _ = self.object_client.create_object(base_container_name,
object_name, data_2)
- resp, body = self.object_client.get_object(base_container_name,
- object_name)
+ _, body = self.object_client.get_object(base_container_name,
+ object_name)
self.assertEqual(body, data_2)
# delete object version 2
resp, _ = self.object_client.delete_object(base_container_name,
object_name)
self.assertContainer(base_container_name, '1', '1024',
vers_container_name)
- resp, body = self.object_client.get_object(base_container_name,
- object_name)
+ _, body = self.object_client.get_object(base_container_name,
+ object_name)
self.assertEqual(body, data_1)
# delete object version 1
- resp, _ = self.object_client.delete_object(base_container_name,
- object_name)
+ self.object_client.delete_object(base_container_name,
+ object_name)
# containers should be empty
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
new file mode 100644
index 0000000..0df5fbd
--- /dev/null
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+
+class GroupTypesTest(base.BaseVolumeAdminTest):
+ _api_version = 3
+ min_microversion = '3.11'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
+ def test_group_type_create_list_show(self):
+ # Create/list/show group type.
+ name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
+ description = data_utils.rand_name("group-type-description")
+ group_specs = {"consistent_group_snapshot_enabled": "<is> False"}
+ params = {'name': name,
+ 'description': description,
+ 'group_specs': group_specs,
+ 'is_public': True}
+ body = self.create_group_type(**params)
+ self.assertIn('name', body)
+ err_msg = ("The created group_type %(var)s is not equal to the "
+ "requested %(var)s")
+ self.assertEqual(name, body['name'], err_msg % {"var": "name"})
+ self.assertEqual(description, body['description'],
+ err_msg % {"var": "description"})
+
+ group_list = (
+ self.admin_group_types_client.list_group_types()['group_types'])
+ self.assertIsInstance(group_list, list)
+ self.assertNotEmpty(group_list)
+
+ fetched_group_type = self.admin_group_types_client.show_group_type(
+ body['id'])['group_type']
+ for key in params.keys():
+ self.assertEqual(params[key], fetched_group_type[key],
+ '%s of the fetched group_type is different '
+ 'from the created group_type' % key)
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 8609bdb..6b53d85 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -17,24 +17,56 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
-class GroupsTest(base.BaseVolumeAdminTest):
+class BaseGroupsTest(base.BaseVolumeAdminTest):
+
+ def _delete_group(self, grp_id, delete_volumes=True):
+ self.groups_client.delete_group(grp_id, delete_volumes)
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp_id:
+ self.volumes_client.wait_for_resource_deletion(vol['id'])
+ self.groups_client.wait_for_resource_deletion(grp_id)
+
+ def _delete_group_snapshot(self, group_snapshot_id, grp_id):
+ self.group_snapshots_client.delete_group_snapshot(group_snapshot_id)
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for vol in vols:
+ for snap in snapshots:
+ if (vol['group_id'] == grp_id and
+ vol['id'] == snap['volume_id']):
+ self.snapshots_client.wait_for_resource_deletion(
+ snap['id'])
+ self.group_snapshots_client.wait_for_resource_deletion(
+ group_snapshot_id)
+
+ def _create_group(self, group_type, volume_type, grp_name=None):
+ if not grp_name:
+ grp_name = data_utils.rand_name('Group')
+ grp = self.groups_client.create_group(
+ group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp_name)['group']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_group, grp['id'])
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+ self.assertEqual(grp_name, grp['name'])
+ return grp
+
+
+class GroupsTest(BaseGroupsTest):
_api_version = 3
min_microversion = '3.14'
max_microversion = 'latest'
- def _delete_group(self, grp_id, delete_volumes=True):
- self.admin_groups_client.delete_group(grp_id, delete_volumes)
- vols = self.admin_volume_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp_id:
- self.admin_volume_client.wait_for_resource_deletion(vol['id'])
- self.admin_groups_client.wait_for_resource_deletion(grp_id)
-
@decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
def test_group_create_show_list_delete(self):
# Create volume type
@@ -45,21 +77,13 @@
# Create group
grp1_name = data_utils.rand_name('Group1')
- grp1 = self.admin_groups_client.create_group(
- group_type=group_type['id'],
- volume_types=[volume_type['id']],
- name=grp1_name)['group']
- waiters.wait_for_volume_resource_status(
- self.admin_groups_client, grp1['id'], 'available')
+ grp1 = self._create_group(group_type, volume_type,
+ grp_name=grp1_name)
grp1_id = grp1['id']
grp2_name = data_utils.rand_name('Group2')
- grp2 = self.admin_groups_client.create_group(
- group_type=group_type['id'],
- volume_types=[volume_type['id']],
- name=grp2_name)['group']
- waiters.wait_for_volume_resource_status(
- self.admin_groups_client, grp2['id'], 'available')
+ grp2 = self._create_group(group_type, volume_type,
+ grp_name=grp2_name)
grp2_id = grp2['id']
# Create volume
@@ -68,32 +92,32 @@
'volume_type': volume_type['id'],
'group_id': grp1['id'],
'size': CONF.volume.volume_size}
- vol1 = self.admin_volume_client.create_volume(**params)['volume']
+ vol1 = self.volumes_client.create_volume(**params)['volume']
self.assertEqual(grp1['id'], vol1['group_id'])
waiters.wait_for_volume_resource_status(
- self.admin_volume_client, vol1['id'], 'available')
+ self.volumes_client, vol1['id'], 'available')
vol1_id = vol1['id']
# Get a given group
- grp1 = self.admin_groups_client.show_group(grp1['id'])['group']
+ grp1 = self.groups_client.show_group(grp1['id'])['group']
self.assertEqual(grp1_name, grp1['name'])
self.assertEqual(grp1_id, grp1['id'])
- grp2 = self.admin_groups_client.show_group(grp2['id'])['group']
+ grp2 = self.groups_client.show_group(grp2['id'])['group']
self.assertEqual(grp2_name, grp2['name'])
self.assertEqual(grp2_id, grp2['id'])
# Get all groups with detail
- grps = self.admin_groups_client.list_groups(
- detail=True)['groups']
- filtered_grps = [g for g in grps if g['id'] in [grp1_id, grp2_id]]
- self.assertEqual(2, len(filtered_grps))
- for grp in filtered_grps:
- self.assertEqual([volume_type['id']], grp['volume_types'])
- self.assertEqual(group_type['id'], grp['group_type'])
+ grps = self.groups_client.list_groups(detail=True)['groups']
+ for grp_id in [grp1_id, grp2_id]:
+ filtered_grps = [g for g in grps if g['id'] == grp_id]
+ self.assertEqual(1, len(filtered_grps))
+ self.assertEqual([volume_type['id']],
+ filtered_grps[0]['volume_types'])
+ self.assertEqual(group_type['id'],
+ filtered_grps[0]['group_type'])
- vols = self.admin_volume_client.list_volumes(
- detail=True)['volumes']
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
filtered_vols = [v for v in vols if v['id'] in [vol1_id]]
self.assertEqual(1, len(filtered_vols))
for vol in filtered_vols:
@@ -104,6 +128,266 @@
self._delete_group(grp1_id)
# grp2 is empty so delete_volumes flag can be set to False
self._delete_group(grp2_id, delete_volumes=False)
- grps = self.admin_groups_client.list_groups(
- detail=True)['groups']
+ grps = self.groups_client.list_groups(detail=True)['groups']
self.assertEmpty(grps)
+
+ @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
+ def test_group_snapshot_create_show_list_delete(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ grp = self._create_group(group_type, volume_type)
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = (
+ self.group_snapshots_client.create_group_snapshot(
+ group_id=grp['id'],
+ name=group_snapshot_name)['group_snapshot'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_group_snapshot,
+ group_snapshot['id'], grp['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client,
+ group_snapshot['id'], 'available')
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get a given group snapshot
+ group_snapshot = self.group_snapshots_client.show_group_snapshot(
+ group_snapshot['id'])['group_snapshot']
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get all group snapshots with details, check some detail-specific
+ # elements, and look for the created group snapshot
+ group_snapshots = (self.group_snapshots_client.list_group_snapshots(
+ detail=True)['group_snapshots'])
+ for grp_snapshot in group_snapshots:
+ self.assertIn('created_at', grp_snapshot)
+ self.assertIn('group_id', grp_snapshot)
+ self.assertIn((group_snapshot['name'], group_snapshot['id']),
+ [(m['name'], m['id']) for m in group_snapshots])
+
+ # Delete group snapshot
+ self._delete_group_snapshot(group_snapshot['id'], grp['id'])
+ group_snapshots = (self.group_snapshots_client.list_group_snapshots()
+ ['group_snapshots'])
+ self.assertEmpty(group_snapshots)
+
+ @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
+ def test_create_group_from_group_snapshot(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create Group
+ grp = self._create_group(group_type, volume_type)
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group_snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = (
+ self.group_snapshots_client.create_group_snapshot(
+ group_id=grp['id'],
+ name=group_snapshot_name)['group_snapshot'])
+ self.addCleanup(self._delete_group_snapshot,
+ group_snapshot['id'], grp['id'])
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], 'available')
+
+ # Create Group from Group snapshot
+ grp_name2 = data_utils.rand_name('Group_from_snap')
+ grp2 = self.groups_client.create_group_from_source(
+ group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
+ self.addCleanup(self._delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, vol['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp2['id'], 'available')
+
+ @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
+ def test_create_group_from_group(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create Group
+ grp = self._create_group(group_type, volume_type)
+
+ # Create volume
+ self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
+
+ # Create Group from Group
+ grp_name2 = data_utils.rand_name('Group_from_grp')
+ grp2 = self.groups_client.create_group_from_source(
+ source_group_id=grp['id'], name=grp_name2)['group']
+ self.addCleanup(self._delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, vol['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp2['id'], 'available')
+
+ @decorators.idempotent_id('4a8a6fd2-8b3b-4641-8f54-6a6f99320006')
+ def test_group_update(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create Group
+ grp = self._create_group(group_type, volume_type)
+
+ # Create volumes
+ grp_vols = []
+ for _ in range(2):
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+ grp_vols.append(vol)
+ vol2 = grp_vols[1]
+
+ # Remove a volume from group and update name and description
+ new_grp_name = 'new_group'
+ new_desc = 'This is a new group'
+ grp_params = {'name': new_grp_name,
+ 'description': new_desc,
+ 'remove_volumes': vol2['id']}
+ self.groups_client.update_group(grp['id'], **grp_params)
+
+ # Wait for group status to become available
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ # Get the updated Group
+ grp = self.groups_client.show_group(grp['id'])['group']
+ self.assertEqual(new_grp_name, grp['name'])
+ self.assertEqual(new_desc, grp['description'])
+
+ # Get volumes in the group
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ grp_vols = [v for v in vols if v['group_id'] == grp['id']]
+ self.assertEqual(1, len(grp_vols))
+
+ # Add a volume to the group
+ grp_params = {'add_volumes': vol2['id']}
+ self.groups_client.update_group(grp['id'], **grp_params)
+
+ # Wait for group status to become available
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ # Get volumes in the group
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ grp_vols = [v for v in vols if v['group_id'] == grp['id']]
+ self.assertEqual(2, len(grp_vols))
+
+
+class GroupsV319Test(BaseGroupsTest):
+ _api_version = 3
+ min_microversion = '3.19'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
+ def test_reset_group_snapshot_status(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ group = self._create_group(group_type, volume_type)
+
+ # Create volume
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=group['id'])
+
+ # Create group snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = (self.group_snapshots_client.create_group_snapshot(
+ group_id=group['id'], name=group_snapshot_name)['group_snapshot'])
+ self.addCleanup(self._delete_group_snapshot,
+ group_snapshot['id'], group['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if volume['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], 'available')
+
+ # Reset group snapshot status
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.group_snapshots_client,
+ group_snapshot['id'], 'available')
+ self.addCleanup(
+ self.admin_group_snapshots_client.reset_group_snapshot_status,
+ group_snapshot['id'], 'available')
+ for status in ['creating', 'available', 'error']:
+ self.admin_group_snapshots_client.reset_group_snapshot_status(
+ group_snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], status)
+
+
+class GroupsV320Test(BaseGroupsTest):
+ _api_version = 3
+ min_microversion = '3.20'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('b20c696b-0cbc-49a5-8b3a-b1fb9338f45c')
+ def test_reset_group_status(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ group = self._create_group(group_type, volume_type)
+
+ # Reset group status
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.groups_client, group['id'], 'available')
+ self.addCleanup(self.admin_groups_client.reset_group_status,
+ group['id'], 'available')
+ for status in ['creating', 'available', 'error']:
+ self.admin_groups_client.reset_group_status(group['id'], status)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, group['id'], status)
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 2db8010..c0891e4 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -66,8 +66,7 @@
params = {'name': vol_name, 'volume_type': type_name,
'size': CONF.volume.volume_size}
- cls.volume = cls.admin_volume_client.create_volume(
- **params)['volume']
+ cls.volume = cls.create_volume(**params)
if with_prefix:
cls.volume_id_list_with_prefix.append(cls.volume['id'])
else:
@@ -76,21 +75,6 @@
waiters.wait_for_volume_resource_status(cls.admin_volume_client,
cls.volume['id'], 'available')
- @classmethod
- def resource_cleanup(cls):
- # volumes deletion
- vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
- for volume_id in vid_prefix:
- cls.admin_volume_client.delete_volume(volume_id)
- cls.admin_volume_client.wait_for_resource_deletion(volume_id)
-
- vid_no_pre = getattr(cls, 'volume_id_list_without_prefix', [])
- for volume_id in vid_no_pre:
- cls.admin_volume_client.delete_volume(volume_id)
- cls.admin_volume_client.wait_for_resource_deletion(volume_id)
-
- super(VolumeMultiBackendTest, cls).resource_cleanup()
-
@decorators.idempotent_id('c1a41f3f-9dad-493e-9f09-3ff197d477cc')
def test_backend_name_reporting(self):
# get volume id which created by type without prefix
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index 471f39a..41849bc 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -14,6 +14,7 @@
# under the License.
from tempest.api.volume import base
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
@@ -43,6 +44,8 @@
snapshot_id = self.snapshot['id']
self.admin_snapshots_client.reset_snapshot_status(snapshot_id,
status)
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot_id, status)
super(SnapshotsActionsTest, self).tearDown()
def _create_reset_and_force_delete_temp_snapshot(self, status=None):
@@ -50,10 +53,11 @@
# and force delete temp snapshot
temp_snapshot = self.create_snapshot(volume_id=self.volume['id'])
if status:
- self.admin_snapshots_client.\
- reset_snapshot_status(temp_snapshot['id'], status)
- self.admin_snapshots_client.\
- force_delete_snapshot(temp_snapshot['id'])
+ self.admin_snapshots_client.reset_snapshot_status(
+ temp_snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, temp_snapshot['id'], status)
+ self.admin_snapshots_client.force_delete_snapshot(temp_snapshot['id'])
self.snapshots_client.wait_for_resource_deletion(temp_snapshot['id'])
def _get_progress_alias(self):
@@ -63,18 +67,19 @@
def test_reset_snapshot_status(self):
# Reset snapshot status to creating
status = 'creating'
- self.admin_snapshots_client.\
- reset_snapshot_status(self.snapshot['id'], status)
- snapshot_get = self.admin_snapshots_client.show_snapshot(
- self.snapshot['id'])['snapshot']
- self.assertEqual(status, snapshot_get['status'])
+ self.admin_snapshots_client.reset_snapshot_status(
+ self.snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ self.snapshot['id'], status)
@decorators.idempotent_id('41288afd-d463-485e-8f6e-4eea159413eb')
def test_update_snapshot_status(self):
# Reset snapshot status to creating
status = 'creating'
- self.admin_snapshots_client.\
- reset_snapshot_status(self.snapshot['id'], status)
+ self.admin_snapshots_client.reset_snapshot_status(
+ self.snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ self.snapshot['id'], status)
# Update snapshot status to error
progress = '80%'
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
old mode 100755
new mode 100644
diff --git a/tempest/api/volume/admin/test_volume_hosts.py b/tempest/api/volume/admin/test_volume_hosts.py
index e4ec442..ce0cbd2 100644
--- a/tempest/api/volume/admin/test_volume_hosts.py
+++ b/tempest/api/volume/admin/test_volume_hosts.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import random
-
from tempest.api.volume import base
from tempest.lib import decorators
@@ -42,20 +40,25 @@
"The count of volume hosts is < 2, "
"response of list hosts is: %s" % hosts)
- # Note(jeremyZ): Host in volume is always presented in two formats:
- # <host-name> or <host-name>@<driver-name>. Since Mitaka is EOL,
- # both formats can be chosen for test.
- host_names = [host['host_name'] for host in hosts]
- self.assertNotEmpty(host_names, "No available volume host is found, "
- "all hosts that found are: %s" % hosts)
+ # Note(jeremyZ): The show host API is to show volume usage info on the
+ # specified cinder-volume host. If the host does not run cinder-volume
+ # service, or the cinder-volume service is disabled on the host, the
+ # show host API should fail (return code: 404). The cinder-volume host
+ # is presented in format: <host-name>@driver-name.
+ c_vol_hosts = [host['host_name'] for host in hosts
+ if (host['service'] == 'cinder-volume'
+ and host['service-state'] == 'enabled')]
+ self.assertNotEmpty(c_vol_hosts,
+ "No available cinder-volume host is found, "
+ "all hosts that found are: %s" % hosts)
- # Choose a random host to get and check its elements
- host_details = self.admin_hosts_client.show_host(
- random.choice(host_names))['host']
- self.assertNotEmpty(host_details)
+ # Check each cinder-volume host.
host_detail_keys = ['project', 'volume_count', 'snapshot_count',
'host', 'total_volume_gb', 'total_snapshot_gb']
- for detail in host_details:
- self.assertIn('resource', detail)
- for key in host_detail_keys:
- self.assertIn(key, detail['resource'])
+ for host in c_vol_hosts:
+ host_details = self.admin_hosts_client.show_host(host)['host']
+ self.assertNotEmpty(host_details)
+ for detail in host_details:
+ self.assertIn('resource', detail)
+ for key in host_detail_keys:
+ self.assertIn(key, detail['resource'])
diff --git a/tempest/api/volume/admin/test_volume_quota_classes.py b/tempest/api/volume/admin/test_volume_quota_classes.py
index f551575..75dca41 100644
--- a/tempest/api/volume/admin/test_volume_quota_classes.py
+++ b/tempest/api/volume/admin/test_volume_quota_classes.py
@@ -19,6 +19,7 @@
from testtools import matchers
from tempest.api.volume import base
+from tempest.common import identity
from tempest.common import tempest_fixtures as fixtures
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -92,9 +93,10 @@
# Verify a new project's default quotas.
project_name = data_utils.rand_name('quota_class_tenant')
description = data_utils.rand_name('desc_')
- project_id = self.identity_utils.create_project(
+ project_id = identity.identity_utils(self.os_admin).create_project(
name=project_name, description=description)['id']
- self.addCleanup(self.identity_utils.delete_project, project_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ project_id)
default_quotas = self.admin_quotas_client.show_default_quota_set(
project_id)['quota_set']
self.assertThat(default_quotas.items(),
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index f358d7f..42bfcd6 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -13,12 +13,14 @@
# under the License.
from tempest.api.volume import base
+from tempest.common import identity
from tempest.common import tempest_fixtures as fixtures
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups']
+QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
+ 'backup_gigabytes', 'per_volume_gigabytes']
QUOTA_USAGE_KEYS = ['reserved', 'limit', 'in_use']
@@ -36,7 +38,7 @@
def setup_credentials(cls):
super(BaseVolumeQuotasAdminTestJSON, cls).setup_credentials()
cls.demo_tenant_id = cls.os_primary.credentials.tenant_id
- cls.alt_client = cls.os_alt.volumes_client
+ cls.alt_client = cls.os_alt.volumes_client_latest
@classmethod
def setup_clients(cls):
@@ -66,7 +68,9 @@
new_quota_set = {'gigabytes': 1009,
'volumes': 11,
'snapshots': 11,
- 'backups': 11}
+ 'backups': 11,
+ 'backup_gigabytes': 1009,
+ 'per_volume_gigabytes': 1009}
# Update limits for all quota resources
quota_set = self.admin_quotas_client.update_quota_set(
@@ -100,7 +104,7 @@
volume = self.create_volume()
self.addCleanup(self.delete_volume,
- self.admin_volume_client, volume['id'])
+ self.volumes_client, volume['id'])
new_quota_usage = self.admin_quotas_client.show_quota_set(
self.demo_tenant_id, params={'usage': True})['quota_set']
@@ -117,10 +121,11 @@
# Admin can delete the resource quota set for a project
project_name = data_utils.rand_name('quota_tenant')
description = data_utils.rand_name('desc_')
- project = self.identity_utils.create_project(project_name,
- description=description)
+ project = identity.identity_utils(self.os_admin).create_project(
+ project_name, description=description)
project_id = project['id']
- self.addCleanup(self.identity_utils.delete_project, project_id)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ project_id)
quota_set_default = self.admin_quotas_client.show_default_quota_set(
project_id)['quota_set']
volume_default = quota_set_default['volumes']
diff --git a/tempest/api/volume/admin/test_volume_retype_with_migration.py b/tempest/api/volume/admin/test_volume_retype_with_migration.py
index 94d5299..f0b3a4f 100644
--- a/tempest/api/volume/admin/test_volume_retype_with_migration.py
+++ b/tempest/api/volume/admin/test_volume_retype_with_migration.py
@@ -85,9 +85,7 @@
volume_source = self.admin_volume_client.show_volume(
self.src_vol['id'])['volume']
- # TODO(erlon): change this to volumes_client client after Bug
- # #1657806 is fixed
- self.admin_volume_client.retype_volume(
+ self.volumes_client.retype_volume(
self.src_vol['id'],
new_type=self.dst_vol_type['name'],
migration_policy='on-demand')
diff --git a/tempest/api/volume/admin/test_volume_type_access.py b/tempest/api/volume/admin/test_volume_type_access.py
index 297ab6e..e93bcb5 100644
--- a/tempest/api/volume/admin/test_volume_type_access.py
+++ b/tempest/api/volume/admin/test_volume_type_access.py
@@ -30,7 +30,7 @@
@classmethod
def setup_clients(cls):
super(VolumeTypesAccessTest, cls).setup_clients()
- cls.alt_client = cls.os_alt.volumes_client
+ cls.alt_client = cls.os_alt.volumes_client_latest
@decorators.idempotent_id('d4dd0027-835f-4554-a6e5-50903fb79184')
def test_volume_type_access_add(self):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index b5a2fb7..730acdf 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -46,14 +46,32 @@
self.volume_type['id'], extra_specs)['extra_specs']
self.assertEqual(extra_specs, body,
"Volume type extra spec incorrectly created")
+
+ # Only update an extra spec
spec_key = "spec2"
extra_spec = {spec_key: "val2"}
body = self.admin_volume_types_client.update_volume_type_extra_specs(
self.volume_type['id'], spec_key, extra_spec)
self.assertIn(spec_key, body)
+ self.assertEqual(extra_spec[spec_key], body[spec_key])
+ body = self.admin_volume_types_client.show_volume_type_extra_specs(
+ self.volume_type['id'], spec_key)
+ self.assertIn(spec_key, body)
self.assertEqual(extra_spec[spec_key], body[spec_key],
"Volume type extra spec incorrectly updated")
+ # Update an existing extra spec and create a new extra spec
+ extra_specs = {spec_key: "val3", "spec4": "val4"}
+ body = self.admin_volume_types_client.create_volume_type_extra_specs(
+ self.volume_type['id'], extra_specs)['extra_specs']
+ self.assertEqual(extra_specs, body)
+ body = self.admin_volume_types_client.list_volume_types_extra_specs(
+ self.volume_type['id'])['extra_specs']
+ for key in extra_specs:
+ self.assertIn(key, body)
+ self.assertEqual(extra_specs[key], body[key],
+ "Volume type extra spec incorrectly created")
+
@decorators.idempotent_id('d4772798-601f-408a-b2a5-29e8a59d1220')
def test_volume_type_extra_spec_create_get_delete(self):
# Create/Get/Delete volume type extra spec.
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 4fa934e..fe249d6 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -61,7 +61,7 @@
@decorators.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
def test_update_multiple_extra_spec(self):
# Should not update volume type extra specs with multiple specs as
- # body.
+ # body.
extra_spec = {"spec1": "val2", "spec2": "val1"}
self.assertRaises(
lib_exc.BadRequest,
@@ -73,7 +73,7 @@
@decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
def test_create_nonexistent_type_id(self):
# Should not create volume type extra spec for nonexistent volume
- # type id.
+ # type id.
extra_specs = {"spec2": "val1"}
self.assertRaises(
lib_exc.NotFound,
@@ -128,10 +128,10 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
- def test_get_nonexistent_extra_spec_id(self):
+ def test_get_nonexistent_extra_spec_name(self):
# Should not get volume type extra spec for nonexistent extra spec
- # id.
+ # name.
self.assertRaises(
lib_exc.NotFound,
self.admin_volume_types_client.show_volume_type_extra_specs,
- self.volume_type['id'], data_utils.rand_uuid())
+ self.volume_type['id'], "nonexistent_extra_spec_name")
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 4cad52a..ae29049 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -22,15 +22,6 @@
class VolumeTypesNegativeTest(base.BaseVolumeAdminTest):
@decorators.attr(type=['negative'])
- @decorators.idempotent_id('b48c98f2-e662-4885-9b71-032256906314')
- def test_create_with_nonexistent_volume_type(self):
- # Should not be able to create volume with nonexistent volume_type.
- params = {'name': data_utils.rand_uuid(),
- 'volume_type': data_utils.rand_uuid()}
- self.assertRaises(lib_exc.NotFound,
- self.volumes_client.create_volume, **params)
-
- @decorators.attr(type=['negative'])
@decorators.idempotent_id('878b4e57-faa2-4659-b0d1-ce740a06ae81')
def test_create_with_empty_name(self):
# Should not be able to create volume type with an empty name.
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index b81a477..3e0deef 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -14,10 +14,10 @@
# under the License.
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -30,21 +30,24 @@
if status:
self.admin_volume_client.reset_volume_status(
temp_volume['id'], status=status)
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, temp_volume['id'], status)
self.admin_volume_client.force_delete_volume(temp_volume['id'])
self.volumes_client.wait_for_resource_deletion(temp_volume['id'])
@decorators.idempotent_id('d063f96e-a2e0-4f34-8b8a-395c42de1845')
def test_volume_reset_status(self):
- # test volume reset status : available->error->available
+ # test volume reset status : available->error->available->maintenance
volume = self.create_volume()
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume['id'], 'available')
self.addCleanup(self.admin_volume_client.reset_volume_status,
volume['id'], status='available')
for status in ['error', 'available', 'maintenance']:
self.admin_volume_client.reset_volume_status(
volume['id'], status=status)
- volume_get = self.admin_volume_client.show_volume(
- volume['id'])['volume']
- self.assertEqual(status, volume_get['status'])
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume['id'], status)
@decorators.idempotent_id('21737d5a-92f2-46d7-b009-a0cc0ee7a570')
def test_volume_force_delete_when_volume_is_creating(self):
@@ -67,7 +70,7 @@
self._create_reset_and_force_delete_temp_volume('maintenance')
@decorators.idempotent_id('d38285d9-929d-478f-96a5-00e66a115b81')
- @test.services('compute')
+ @utils.services('compute')
def test_force_detach_volume(self):
# Create a server and a volume
server_id = self.create_server()['id']
@@ -88,6 +91,8 @@
# Reset volume's status to error
self.admin_volume_client.reset_volume_status(volume_id, status='error')
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume_id, 'error')
# Force detach volume
self.admin_volume_client.force_detach_volume(
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index afc3281..375aacb 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -99,8 +99,7 @@
'available')
# Verify Import Backup
- backups = self.admin_backups_client.list_backups(
- detail=True)['backups']
+ backups = self.admin_backups_client.list_backups()['backups']
self.assertIn(new_id, [b['id'] for b in backups])
# Restore backup
diff --git a/tempest/api/volume/admin/test_volumes_list.py b/tempest/api/volume/admin/test_volumes_list.py
index 9d98b7a..6ce4a85 100644
--- a/tempest/api/volume/admin/test_volumes_list.py
+++ b/tempest/api/volume/admin/test_volumes_list.py
@@ -45,9 +45,9 @@
# Create a volume in admin tenant
adm_vol = self.admin_volume_client.create_volume(
size=CONF.volume.volume_size)['volume']
+ self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
waiters.wait_for_volume_resource_status(self.admin_volume_client,
adm_vol['id'], 'available')
- self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
params = {'all_tenants': 1,
'project_id': self.volumes_client.tenant_id}
# Getting volume list from primary tenant using admin credentials
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 394c453..63ef85b 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -69,19 +69,24 @@
if CONF.service_available.glance:
cls.images_client = cls.os_primary.image_client_v2
- cls.snapshots_client = cls.os_primary.snapshots_v2_client
- cls.volumes_client = cls.os_primary.volumes_v2_client
if cls._api_version == 3:
+ cls.backups_client = cls.os_primary.backups_v3_client
cls.volumes_client = cls.os_primary.volumes_v3_client
- cls.backups_client = cls.os_primary.backups_v2_client
+ cls.messages_client = cls.os_primary.volume_v3_messages_client
+ cls.versions_client = cls.os_primary.volume_v3_versions_client
+ cls.groups_client = cls.os_primary.groups_v3_client
+ cls.group_snapshots_client = (
+ cls.os_primary.group_snapshots_v3_client)
+ else:
+ cls.backups_client = cls.os_primary.backups_v2_client
+ cls.volumes_client = cls.os_primary.volumes_v2_client
+
+ cls.snapshots_client = cls.os_primary.snapshots_v2_client
cls.volumes_extension_client =\
cls.os_primary.volumes_v2_extension_client
cls.availability_zone_client = (
cls.os_primary.volume_v2_availability_zone_client)
cls.volume_limits_client = cls.os_primary.volume_v2_limits_client
- cls.messages_client = cls.os_primary.volume_v3_messages_client
- cls.versions_client = cls.os_primary.volume_v3_versions_client
- cls.groups_client = cls.os_primary.groups_v3_client
def setUp(self):
super(BaseVolumeTest, self).setUp()
@@ -255,6 +260,11 @@
cls.admin_volume_client = cls.os_admin.volumes_v2_client
if cls._api_version == 3:
cls.admin_volume_client = cls.os_admin.volumes_v3_client
+ cls.admin_groups_client = cls.os_admin.groups_v3_client
+ cls.admin_messages_client = cls.os_admin.volume_v3_messages_client
+ cls.admin_group_snapshots_client = \
+ cls.os_admin.group_snapshots_v3_client
+ cls.admin_group_types_client = cls.os_admin.group_types_v3_client
cls.admin_hosts_client = cls.os_admin.volume_hosts_v2_client
cls.admin_snapshot_manage_client = \
cls.os_admin.snapshot_manage_v2_client
@@ -270,9 +280,6 @@
cls.os_admin.volume_capabilities_v2_client
cls.admin_scheduler_stats_client = \
cls.os_admin.volume_scheduler_stats_v2_client
- cls.admin_messages_client = cls.os_admin.volume_v3_messages_client
- cls.admin_groups_client = cls.os_admin.groups_v3_client
- cls.admin_group_types_client = cls.os_admin.group_types_v3_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index d0a87db..0b6ee38 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -20,14 +20,10 @@
class AvailabilityZoneTestJSON(base.BaseVolumeTest):
"""Tests Availability Zone API List"""
- @classmethod
- def setup_clients(cls):
- super(AvailabilityZoneTestJSON, cls).setup_clients()
- cls.client = cls.availability_zone_client
-
@decorators.idempotent_id('01f1ae88-eba9-4c6b-a011-6f7ace06b725')
def test_get_availability_zone_list(self):
# List of availability zone
- availability_zone = (self.client.list_availability_zones()
- ['availabilityZoneInfo'])
+ availability_zone = (
+ self.availability_zone_client.list_availability_zones()
+ ['availabilityZoneInfo'])
self.assertNotEmpty(availability_zone)
diff --git a/tempest/api/volume/test_image_metadata.py b/tempest/api/volume/test_image_metadata.py
index 77baf18..53b3acc 100644
--- a/tempest/api/volume/test_image_metadata.py
+++ b/tempest/api/volume/test_image_metadata.py
@@ -16,9 +16,9 @@
from testtools import matchers
from tempest.api.volume import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -39,8 +39,8 @@
cls.volume = cls.create_volume(imageRef=CONF.compute.image_ref)
@decorators.idempotent_id('03efff0b-5c75-4822-8f10-8789ac15b13e')
- @test.services('image')
- def test_update_image_metadata(self):
+ @utils.services('image')
+ def test_update_show_delete_image_metadata(self):
# Update image metadata
image_metadata = {'image_id': '5137a025-3c5f-43c1-bc64-5f41270040a5',
'image_name': 'image',
@@ -49,7 +49,7 @@
self.volumes_client.update_volume_image_metadata(self.volume['id'],
**image_metadata)
- # Fetch image metadata from the volume
+ # Fetch volume's image metadata by show_volume method
volume_image_metadata = self.volumes_client.show_volume(
self.volume['id'])['volume']['volume_image_metadata']
@@ -62,9 +62,9 @@
'ramdisk_id')
del image_metadata['ramdisk_id']
- # Fetch the new image metadata from the volume
- volume_image_metadata = self.volumes_client.show_volume(
- self.volume['id'])['volume']['volume_image_metadata']
+ # Fetch volume's image metadata by show_volume_image_metadata method
+ volume_image_metadata = self.volumes_client.show_volume_image_metadata(
+ self.volume['id'])['metadata']
# Verify image metadata was updated after item deletion
self.assertThat(volume_image_metadata.items(),
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index e54cd65..e6fe25d 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -81,7 +81,7 @@
self.assertNotIn("key3", body)
@decorators.idempotent_id('e8ff85c5-8f97-477f-806a-3ac364a949ed')
- def test_update_snapshot_metadata_item(self):
+ def test_update_show_snapshot_metadata_item(self):
# Update metadata item for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
@@ -101,6 +101,12 @@
body = self.snapshots_client.update_snapshot_metadata_item(
self.snapshot['id'], "key3", meta=update_item)['meta']
self.assertEqual(update_item, body)
+
+ # Get a specific metadata item of the snapshot
+ body = self.snapshots_client.show_snapshot_metadata_item(
+ self.snapshot['id'], "key3")['meta']
+ self.assertEqual({"key3": expect['key3']}, body)
+
# Get the metadata of the snapshot
body = self.snapshots_client.show_snapshot_metadata(
self.snapshot['id'])['metadata']
diff --git a/tempest/api/volume/test_versions.py b/tempest/api/volume/test_versions.py
index 0083a3b..b4d48db 100644
--- a/tempest/api/volume/test_versions.py
+++ b/tempest/api/volume/test_versions.py
@@ -26,4 +26,4 @@
# NOTE: The version data is checked on service client side
# with JSON-Schema validation. It is enough to just call
# the API here.
- self.versions_client.list_versions()['versions']
+ self.versions_client.list_versions()
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index 5e9a956..d203b2d 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -70,7 +70,7 @@
'Delete one item metadata of the volume failed')
@decorators.idempotent_id('862261c5-8df4-475a-8c21-946e50e36a20')
- def test_update_volume_metadata_item(self):
+ def test_update_show_volume_metadata_item(self):
# Update metadata item for the volume
metadata = {"key1": "value1",
"key2": "value2",
@@ -88,6 +88,12 @@
body = self.volumes_client.update_volume_metadata_item(
self.volume['id'], "key3", update_item)['meta']
self.assertEqual(update_item, body)
+
+ # Get a specific metadata item of the volume
+ body = self.volumes_client.show_volume_metadata_item(
+ self.volume['id'], "key3")['meta']
+ self.assertEqual({"key3": expect['key3']}, body)
+
# Get the metadata of the volume
body = self.volumes_client.show_volume_metadata(
self.volume['id'])['metadata']
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index c4d10c3..be5638e 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -14,12 +14,12 @@
# under the License.
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -35,7 +35,7 @@
@decorators.idempotent_id('fff42874-7db5-4487-a8e1-ddda5fb5288d')
@decorators.attr(type='smoke')
- @test.services('compute')
+ @utils.services('compute')
def test_attach_detach_volume_to_instance(self):
# Create a server
server = self.create_server()
@@ -66,7 +66,7 @@
fetched_volume['bootable'])
@decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
- @test.services('compute')
+ @utils.services('compute')
def test_get_volume_attachment(self):
# Create a server
server = self.create_server()
@@ -94,7 +94,7 @@
self.assertEqual(self.volume['id'], attachment['volume_id'])
@decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
- @test.services('image')
+ @utils.services('image')
def test_volume_upload(self):
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
@@ -112,6 +112,10 @@
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
+ image_info = self.images_client.show_image(image_id)
+ self.assertEqual(image_name, image_info['name'])
+ self.assertEqual(CONF.volume.disk_format, image_info['disk_format'])
+
@decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
# Mark volume as reserved.
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 4b4aeec..1e240b8 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -17,11 +17,11 @@
from testtools import matchers
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -83,6 +83,9 @@
# Get all backups with detail
backups = self.backups_client.list_backups(
detail=True)['backups']
+ for backup_info in backups:
+ self.assertIn('created_at', backup_info)
+ self.assertIn('links', backup_info)
self.assertIn((backup['name'], backup['id']),
[(m['name'], m['id']) for m in backups])
@@ -97,7 +100,7 @@
matchers.ContainsAll(metadata.items()))
@decorators.idempotent_id('07af8f6d-80af-44c9-a5dc-c8427b1b62e6')
- @test.services('compute')
+ @utils.services('compute')
def test_backup_create_attached_volume(self):
"""Test backup create using force flag.
@@ -119,7 +122,7 @@
self.assertEqual(backup_name, backup['name'])
@decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
- @test.services('image')
+ @utils.services('image')
def test_bootable_volume_backup_and_restore(self):
# Create volume from image
img_uuid = CONF.compute.image_ref
@@ -140,3 +143,39 @@
restored_volume_id)['volume']
self.assertEqual('true', restored_volume_info['bootable'])
+
+
+class VolumesBackupsV39Test(base.BaseVolumeTest):
+
+ _api_version = 3
+ min_microversion = '3.9'
+ max_microversion = 'latest'
+
+ @classmethod
+ def skip_checks(cls):
+ super(VolumesBackupsV39Test, cls).skip_checks()
+ if not CONF.volume_feature_enabled.backup:
+ raise cls.skipException("Cinder backup feature disabled")
+
+ @decorators.idempotent_id('9b374cbc-be5f-4d37-8848-7efb8a873dcc')
+ def test_update_backup(self):
+ # Create volume and backup
+ volume = self.create_volume()
+ backup = self.create_backup(volume_id=volume['id'])
+
+ # Update backup and assert response body for update_backup method
+ update_kwargs = {
+ 'name': data_utils.rand_name(self.__class__.__name__ + '-Backup'),
+ 'description': data_utils.rand_name("volume-backup-description")
+ }
+ update_backup = self.backups_client.update_backup(
+ backup['id'], **update_kwargs)['backup']
+ self.assertEqual(backup['id'], update_backup['id'])
+ self.assertEqual(update_kwargs['name'], update_backup['name'])
+ self.assertIn('links', update_backup)
+
+ # Assert response body for show_backup method
+ retrieved_backup = self.backups_client.show_backup(
+ backup['id'])['backup']
+ for key in update_kwargs:
+ self.assertEqual(update_kwargs[key], retrieved_backup[key])
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index 4c13375..ea39a21 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -14,9 +14,9 @@
# under the License.
from tempest.api.volume import base
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -30,6 +30,18 @@
if not CONF.volume_feature_enabled.clone:
raise cls.skipException("Cinder volume clones are disabled")
+ def _verify_volume_clone(self, source_volume, cloned_volume,
+ bootable='false', extra_size=0):
+
+ cloned_vol_details = self.volumes_client.show_volume(
+ cloned_volume['id'])['volume']
+
+ self.assertEqual(source_volume['id'],
+ cloned_vol_details['source_volid'])
+ self.assertEqual(source_volume['size'] + extra_size,
+ cloned_vol_details['size'])
+ self.assertEqual(bootable, cloned_vol_details['bootable'])
+
@decorators.idempotent_id('9adae371-a257-43a5-9555-dc7c88e66e0e')
def test_create_from_volume(self):
# Creates a volume from another volume passing a size different from
@@ -41,13 +53,10 @@
dst_vol = self.create_volume(source_volid=src_vol['id'],
size=src_size + 1)
- volume = self.volumes_client.show_volume(dst_vol['id'])['volume']
- # Should allow
- self.assertEqual(volume['source_volid'], src_vol['id'])
- self.assertEqual(volume['size'], src_size + 1)
+ self._verify_volume_clone(src_vol, dst_vol, extra_size=1)
@decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
- @test.services('image')
+ @utils.services('image')
def test_create_from_bootable_volume(self):
# Create volume from image
img_uuid = CONF.compute.image_ref
@@ -55,10 +64,5 @@
# Create a volume from the bootable volume
cloned_vol = self.create_volume(source_volid=src_vol['id'])
- cloned_vol_details = self.volumes_client.show_volume(
- cloned_vol['id'])['volume']
- # Verify cloned volume creation as expected
- self.assertEqual('true', cloned_vol_details['bootable'])
- self.assertEqual(src_vol['id'], cloned_vol_details['source_volid'])
- self.assertEqual(src_vol['size'], cloned_vol_details['size'])
+ self._verify_volume_clone(src_vol, cloned_vol, bootable='true')
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 1eb76a0..b73bdf2 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -13,12 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
import testtools
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -53,3 +57,130 @@
resized_volume = self.volumes_client.show_volume(
volume['id'])['volume']
self.assertEqual(extend_size, resized_volume['size'])
+
+
+class VolumesExtendAttachedTest(base.BaseVolumeTest):
+ """Tests extending the size of an attached volume."""
+
+ # We need admin credentials for getting instance action event details. By
+ # default a non-admin can list and show instance actions if they own the
+ # server instance, but since the event details can contain error messages
+ # and tracebacks, like an instance fault, those are not viewable by
+ # non-admins. This is obviously not a great user experience since the user
+ # may not know when the operation is actually complete. A microversion in
+ # the compute API will be added so that non-admins can see instance action
+ # events but will continue to hide the traceback field.
+ # TODO(mriedem): Change this to not rely on the admin user to get the event
+ # details once that microversion is available in Nova.
+ credentials = ['primary', 'admin']
+
+ _api_version = 3
+ # NOTE(mriedem): The minimum required volume API version is 3.42 and the
+ # minimum required compute API microversion is 2.51, but the compute call
+ # is implicit - Cinder calls Nova at that microversion, Tempest does not.
+ min_microversion = '3.42'
+
+ @classmethod
+ def setup_clients(cls):
+ super(VolumesExtendAttachedTest, cls).setup_clients()
+ cls.admin_servers_client = cls.os_admin.servers_client
+
+ def _find_extend_volume_instance_action(self, server_id):
+ actions = self.servers_client.list_instance_actions(
+ server_id)['instanceActions']
+ for action in actions:
+ if action['action'] == 'extend_volume':
+ return action
+
+ def _find_extend_volume_instance_action_finish_event(self, action):
+ # This has to be called by an admin client otherwise
+ # the events don't show up.
+ action = self.admin_servers_client.show_instance_action(
+ action['instance_uuid'], action['request_id'])['instanceAction']
+ for event in action['events']:
+ if (event['event'] == 'compute_extend_volume' and
+ event['finish_time']):
+ return event
+
+ @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
+ @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
+ "Attached volume extend is disabled.")
+ @utils.services('compute')
+ def test_extend_attached_volume(self):
+ """This is a happy path test which does the following:
+
+ * Create a volume at the configured volume_size.
+ * Create a server instance.
+ * Attach the volume to the server.
+ * Wait for the volume status to be "in-use".
+ * Extend the size of the volume and wait for the volume status to go
+ back to "in-use".
+ * Assert the volume size change is reflected in the volume API.
+ * Wait for the "compute_extend_volume" instance action event to show
+ up in the compute API with the success or failure status. We fail
+ if we timeout waiting for the instance action event to show up, or
+ if the action on the server fails.
+ """
+ # Create a test volume. Will be automatically cleaned up on teardown.
+ volume = self.create_volume()
+ # Create a test server. Will be automatically cleaned up on teardown.
+ server = self.create_server()
+ # Attach the volume to the server and wait for the volume status to be
+ # "in-use".
+ self.attach_volume(server['id'], volume['id'])
+ # Extend the size of the volume. If this is successful, the volume API
+ # will change the status on the volume to "extending" before doing an
+ # RPC cast to the volume manager on the backend. Note that we multiply
+ # the size of the volume since certain Cinder backends, e.g. ScaleIO,
+ # require multiples of 8GB.
+ extend_size = volume['size'] * 2
+ self.volumes_client.extend_volume(volume['id'], new_size=extend_size)
+ # The volume status should go back to in-use since it is still attached
+ # to the server instance.
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
+ # Assert that the volume size has changed in the volume API.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+ self.assertEqual(extend_size, volume['size'])
+ # Now we wait for the "compute_extend_volume" instance action event
+ # to show up for the server instance. This is our indication that the
+ # asynchronous operation is complete on the compute side.
+ start_time = int(time.time())
+ timeout = self.servers_client.build_timeout
+ action = self._find_extend_volume_instance_action(server['id'])
+ while action is None and int(time.time()) - start_time < timeout:
+ time.sleep(self.servers_client.build_interval)
+ action = self._find_extend_volume_instance_action(server['id'])
+
+ if action is None:
+ msg = ("Timed out waiting to get 'extend_volume' instance action "
+ "record for server %(server)s after %(timeout)s seconds." %
+ {'server': server['id'], 'timeout': timeout})
+ raise lib_exc.TimeoutException(msg)
+
+ # Now that we found the extend_volume instance action, we can wait for
+ # the compute_extend_volume instance action event to show up to
+ # indicate the operation is complete.
+ start_time = int(time.time())
+ event = self._find_extend_volume_instance_action_finish_event(action)
+ while event is None and int(time.time()) - start_time < timeout:
+ time.sleep(self.servers_client.build_interval)
+ event = self._find_extend_volume_instance_action_finish_event(
+ action)
+
+ if event is None:
+ msg = ("Timed out waiting to get 'compute_extend_volume' instance "
+ "action event record for server %(server)s and request "
+ "%(request_id)s after %(timeout)s seconds." %
+ {'server': server['id'],
+ 'request_id': action['request_id'],
+ 'timeout': timeout})
+ raise lib_exc.TimeoutException(msg)
+
+ # Finally, assert that the action completed successfully.
+ self.assertTrue(
+ event['result'].lower() == 'success',
+ "Unexpected compute_extend_volume result '%(result)s' for request "
+ "%(request_id)s." %
+ {'result': event['result'],
+ 'request_id': action['request_id']})
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index ec9a0dd..71db95c 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -17,11 +17,11 @@
from testtools import matchers
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from tempest import test
CONF = config.CONF
@@ -122,7 +122,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
- @test.services('image')
+ @utils.services('image')
def test_volume_create_get_update_delete_from_image(self):
image = self.images_client.show_image(CONF.compute.image_ref)
min_disk = image['min_disk']
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 8593d3a..b5f98ea 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -50,7 +50,7 @@
return
def str_vol(vol):
- return "%s:%s" % (vol['id'], vol[self.name])
+ return "%s:%s" % (vol['id'], vol['name'])
raw_msg = "Could not find volumes %s in expected list %s; fetched %s"
self.fail(raw_msg % ([str_vol(v) for v in missing_vols],
@@ -60,7 +60,6 @@
@classmethod
def resource_setup(cls):
super(VolumesListTestJSON, cls).resource_setup()
- cls.name = cls.VOLUME_FIELDS[1]
existing_volumes = cls.volumes_client.list_volumes()['volumes']
cls.volume_id_list = [vol['id'] for vol in existing_volumes]
@@ -117,22 +116,20 @@
@decorators.idempotent_id('a28e8da4-0b56-472f-87a8-0f4d3f819c02')
def test_volume_list_by_name(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
- params = {self.name: volume[self.name]}
+ params = {'name': volume['name']}
fetched_vol = self.volumes_client.list_volumes(
params=params)['volumes']
self.assertEqual(1, len(fetched_vol), str(fetched_vol))
- self.assertEqual(fetched_vol[0][self.name],
- volume[self.name])
+ self.assertEqual(fetched_vol[0]['name'], volume['name'])
@decorators.idempotent_id('2de3a6d4-12aa-403b-a8f2-fdeb42a89623')
def test_volume_list_details_by_name(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
- params = {self.name: volume[self.name]}
+ params = {'name': volume['name']}
fetched_vol = self.volumes_client.list_volumes(
detail=True, params=params)['volumes']
self.assertEqual(1, len(fetched_vol), str(fetched_vol))
- self.assertEqual(fetched_vol[0][self.name],
- volume[self.name])
+ self.assertEqual(fetched_vol[0]['name'], volume['name'])
@decorators.idempotent_id('39654e13-734c-4dab-95ce-7613bf8407ce')
def test_volumes_list_by_status(self):
@@ -213,7 +210,7 @@
def test_volume_list_param_display_name_and_status(self):
# Test to list volume when display name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
- params = {self.name: volume[self.name],
+ params = {'name': volume['name'],
'status': 'available'}
self._list_by_param_value_and_assert(params)
@@ -221,7 +218,7 @@
def test_volume_list_with_detail_param_display_name_and_status(self):
# Test to list volume when name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
- params = {self.name: volume[self.name],
+ params = {'name': volume['name'],
'status': 'available'}
self._list_by_param_value_and_assert(params, with_detail=True)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 4e19e62..f139283 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -16,13 +16,13 @@
import six
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -35,7 +35,6 @@
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
- cls.mountpoint = "/dev/vdc"
def create_image(self):
# Create image
@@ -168,7 +167,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6')
- @test.services('compute')
+ @utils.services('compute')
def test_attach_volumes_with_nonexistent_volume_id(self):
server = self.create_server()
@@ -176,7 +175,7 @@
self.volumes_client.attach_volume,
data_utils.rand_uuid(),
instance_uuid=server['id'],
- mountpoint=self.mountpoint)
+ mountpoint="/dev/vdc")
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
@@ -292,7 +291,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5b810c91-0ad1-47ce-aee8-615f789be78f')
- @test.services('image')
+ @utils.services('image')
def test_create_volume_from_image_with_decreasing_size(self):
# Create image
image = self.create_image()
@@ -307,7 +306,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d15e7f35-2cfc-48c8-9418-c8223a89bcbb')
- @test.services('image')
+ @utils.services('image')
def test_create_volume_from_deactivated_image(self):
# Create image
image = self.create_image()
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 44c1def..dcd3518 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -10,14 +10,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from testtools import matchers
from tempest.api.volume import base
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from tempest import test
CONF = config.CONF
@@ -36,7 +37,7 @@
cls.volume_origin = cls.create_volume()
@decorators.idempotent_id('8567b54c-4455-446d-a1cf-651ddeaa3ff2')
- @test.services('compute')
+ @utils.services('compute')
def test_snapshot_create_delete_with_volume_in_use(self):
# Create a test instance
server = self.create_server()
@@ -58,7 +59,7 @@
self.delete_snapshot(snapshot2['id'])
@decorators.idempotent_id('5210a1de-85a0-11e6-bb21-641c676a5d61')
- @test.services('compute')
+ @utils.services('compute')
def test_snapshot_create_offline_delete_online(self):
# Create a snapshot while it is not attached
@@ -149,3 +150,16 @@
# Should allow
self.assertEqual(volume['snapshot_id'], src_snap['id'])
self.assertEqual(volume['size'], src_size + 1)
+
+ @decorators.idempotent_id('bbcfa285-af7f-479e-8c1a-8c34fc16543c')
+ @testtools.skipUnless(CONF.volume_feature_enabled.backup,
+ "Cinder backup is disabled")
+ def test_snapshot_backup(self):
+ # Create a snapshot
+ snapshot = self.create_snapshot(volume_id=self.volume_origin['id'])
+
+ backup = self.create_backup(volume_id=self.volume_origin['id'],
+ snapshot_id=snapshot['id'])
+ backup_info = self.backups_client.show_backup(backup['id'])['backup']
+ self.assertEqual(self.volume_origin['id'], backup_info['volume_id'])
+ self.assertEqual(snapshot['id'], backup_info['snapshot_id'])
diff --git a/tempest/clients.py b/tempest/clients.py
index c3357bb..ca205c8 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,24 +13,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest import config
from tempest.lib import auth
from tempest.lib import exceptions as lib_exc
from tempest.lib.services import clients
-from tempest.services import object_storage
-from tempest.services import orchestration
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class Manager(clients.ServiceClients):
"""Top level manager for OpenStack tempest clients"""
- default_params = config.service_client_config()
-
def __init__(self, credentials, scope='project'):
"""Initialization of Manager class.
@@ -51,15 +44,10 @@
self._set_object_storage_clients()
self._set_image_clients()
self._set_network_clients()
-
- self.orchestration_client = orchestration.OrchestrationClient(
- self.auth_provider,
- CONF.orchestration.catalog_type,
- CONF.orchestration.region or CONF.identity.region,
- endpoint_type=CONF.orchestration.endpoint_type,
- build_interval=CONF.orchestration.build_interval,
- build_timeout=CONF.orchestration.build_timeout,
- **self.default_params)
+ # TODO(andreaf) This is maintained for backward compatibility
+ # with plugins, but it should removed eventually, since it was
+ # never a stable interface and it's not useful anyways
+ self.default_params = config.service_client_config()
def _set_network_clients(self):
self.network_agents_client = self.network.AgentsClient()
@@ -208,6 +196,7 @@
self.identity_v3.EndPointsFilterClient(**params_v3)
self.endpoint_groups_client = self.identity_v3.EndPointGroupsClient(
**params_v3)
+ self.catalog_client = self.identity_v3.CatalogClient(**params_v3)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
@@ -229,65 +218,75 @@
def _set_volume_clients(self):
- self.volume_qos_client = self.volume_v1.QosSpecsClient()
- self.volume_qos_v2_client = self.volume_v2.QosSpecsClient()
- self.volume_services_client = self.volume_v1.ServicesClient()
- self.volume_services_v2_client = self.volume_v2.ServicesClient()
- self.backups_client = self.volume_v1.BackupsClient()
- self.backups_v2_client = self.volume_v2.BackupsClient()
- self.encryption_types_client = self.volume_v1.EncryptionTypesClient()
- self.encryption_types_v2_client = \
- self.volume_v2.EncryptionTypesClient()
- self.snapshot_manage_v2_client = self.volume_v2.SnapshotManageClient()
- self.snapshots_client = self.volume_v1.SnapshotsClient()
- self.snapshots_v2_client = self.volume_v2.SnapshotsClient()
- self.volume_manage_v2_client = self.volume_v2.VolumeManageClient()
- self.volumes_client = self.volume_v1.VolumesClient()
- self.volumes_v2_client = self.volume_v2.VolumesClient()
- self.volumes_v3_client = self.volume_v3.VolumesClient()
- self.volume_v3_messages_client = self.volume_v3.MessagesClient()
- self.volume_v3_versions_client = self.volume_v3.VersionsClient()
- self.volume_types_client = self.volume_v1.TypesClient()
- self.volume_types_v2_client = self.volume_v2.TypesClient()
- self.volume_hosts_client = self.volume_v1.HostsClient()
- self.volume_hosts_v2_client = self.volume_v2.HostsClient()
- self.volume_quotas_client = self.volume_v1.QuotasClient()
- self.volume_quotas_v2_client = self.volume_v2.QuotasClient()
- self.volume_quota_classes_v2_client = \
- self.volume_v2.QuotaClassesClient()
- self.volumes_extension_client = self.volume_v1.ExtensionsClient()
- self.volumes_v2_extension_client = self.volume_v2.ExtensionsClient()
- self.groups_v3_client = self.volume_v3.GroupsClient()
- self.group_types_v3_client = self.volume_v3.GroupTypesClient()
- self.volume_availability_zone_client = \
- self.volume_v1.AvailabilityZoneClient()
- self.volume_v2_availability_zone_client = \
- self.volume_v2.AvailabilityZoneClient()
- self.volume_limits_client = self.volume_v1.LimitsClient()
- self.volume_v2_limits_client = self.volume_v2.LimitsClient()
- self.volume_capabilities_v2_client = \
- self.volume_v2.CapabilitiesClient()
- self.volume_scheduler_stats_v2_client = \
- self.volume_v2.SchedulerStatsClient()
- self.volume_transfers_v2_client = \
- self.volume_v2.TransfersClient()
+ if CONF.volume_feature_enabled.api_v1:
+ self.backups_client = self.volume_v1.BackupsClient()
+ self.encryption_types_client = \
+ self.volume_v1.EncryptionTypesClient()
+ self.snapshots_client = self.volume_v1.SnapshotsClient()
+ self.volume_availability_zone_client = \
+ self.volume_v1.AvailabilityZoneClient()
+ self.volume_hosts_client = self.volume_v1.HostsClient()
+ self.volume_limits_client = self.volume_v1.LimitsClient()
+ self.volume_qos_client = self.volume_v1.QosSpecsClient()
+ self.volume_quotas_client = self.volume_v1.QuotasClient()
+ self.volume_services_client = self.volume_v1.ServicesClient()
+ self.volume_types_client = self.volume_v1.TypesClient()
+ self.volumes_client = self.volume_v1.VolumesClient()
+ self.volumes_extension_client = self.volume_v1.ExtensionsClient()
+
+ if CONF.volume_feature_enabled.api_v2:
+ self.backups_v2_client = self.volume_v2.BackupsClient()
+ self.encryption_types_v2_client = \
+ self.volume_v2.EncryptionTypesClient()
+ self.snapshot_manage_v2_client = \
+ self.volume_v2.SnapshotManageClient()
+ self.snapshots_v2_client = self.volume_v2.SnapshotsClient()
+ self.volume_capabilities_v2_client = \
+ self.volume_v2.CapabilitiesClient()
+ self.volume_manage_v2_client = self.volume_v2.VolumeManageClient()
+ self.volume_qos_v2_client = self.volume_v2.QosSpecsClient()
+ self.volume_services_v2_client = self.volume_v2.ServicesClient()
+ self.volume_types_v2_client = self.volume_v2.TypesClient()
+ self.volume_hosts_v2_client = self.volume_v2.HostsClient()
+ self.volume_quotas_v2_client = self.volume_v2.QuotasClient()
+ self.volume_quota_classes_v2_client = \
+ self.volume_v2.QuotaClassesClient()
+ self.volume_scheduler_stats_v2_client = \
+ self.volume_v2.SchedulerStatsClient()
+ self.volume_transfers_v2_client = \
+ self.volume_v2.TransfersClient()
+ self.volume_v2_availability_zone_client = \
+ self.volume_v2.AvailabilityZoneClient()
+ self.volume_v2_limits_client = self.volume_v2.LimitsClient()
+ self.volumes_v2_client = self.volume_v2.VolumesClient()
+ self.volumes_v2_extension_client = \
+ self.volume_v2.ExtensionsClient()
+
+ # Set default client for users that don't need explicit version
+ self.volumes_client_latest = self.volumes_v2_client
+ self.snapshots_client_latest = self.snapshots_v2_client
+
+ if CONF.volume_feature_enabled.api_v3:
+ self.backups_v3_client = self.volume_v3.BackupsClient()
+ self.group_types_v3_client = self.volume_v3.GroupTypesClient()
+ self.groups_v3_client = self.volume_v3.GroupsClient()
+ self.group_snapshots_v3_client = \
+ self.volume_v3.GroupSnapshotsClient()
+ self.snapshots_v3_client = self.volume_v3.SnapshotsClient()
+ self.volume_v3_messages_client = self.volume_v3.MessagesClient()
+ self.volume_v3_versions_client = self.volume_v3.VersionsClient()
+ self.volumes_v3_client = self.volume_v3.VolumesClient()
+
+ # Set default client for users that don't need explicit version
+ self.volumes_client_latest = self.volumes_v3_client
+ self.snapshots_client_latest = self.snapshots_v3_client
def _set_object_storage_clients(self):
- # NOTE(andreaf) Load configuration from config. Once object storage
- # is in lib, configuration will be pulled directly from the registry
- # and this will not be required anymore.
- params = config.service_client_config('object-storage')
-
- self.account_client = object_storage.AccountClient(self.auth_provider,
- **params)
- self.bulk_client = object_storage.BulkMiddlewareClient(
- self.auth_provider, **params)
- self.capabilities_client = object_storage.CapabilitiesClient(
- self.auth_provider, **params)
- self.container_client = object_storage.ContainerClient(
- self.auth_provider, **params)
- self.object_client = object_storage.ObjectClient(self.auth_provider,
- **params)
+ self.account_client = self.object_storage.AccountClient()
+ self.bulk_client = self.object_storage.BulkMiddlewareClient()
+ self.capabilities_client = self.object_storage.CapabilitiesClient()
+ self.container_client = self.object_storage.ContainerClient()
+ self.object_client = self.object_storage.ObjectClient()
def get_auth_provider_class(credentials):
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 172d9e1..c2f8627 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -55,8 +55,15 @@
**-h**, **--help** (Optional) Shows help message with the description of
utility and its arguments, and exits.
-**c /etc/tempest.conf**, **--config-file /etc/tempest.conf** (Optional) Path to
-tempest config file.
+**-c /etc/tempest.conf**, **--config-file /etc/tempest.conf** (Optional) Path
+to tempest config file. If not specified, it searches for tempest.conf in these
+locations:
+
+- ./etc/
+- /etc/tempest
+- ~/.tempest/
+- ~/
+- /etc/
**--os-username <auth-user-name>** (Optional) Name used for authentication with
the OpenStack Identity service. Defaults to env[OS_USERNAME]. Note: User should
@@ -78,7 +85,7 @@
will have the prefix with the given TAG in its name. Using tag is recommended
for the further using, cleaning resources.
-**-r CONCURRENCY**, **--concurrency CONCURRENCY** (Required) Concurrency count
+**-r CONCURRENCY**, **--concurrency CONCURRENCY** (Optional) Concurrency count
(default: 1). The number of accounts required can be estimated as
CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
a different tenant. This is required to provide isolation between test for
@@ -102,8 +109,8 @@
import yaml
from tempest.common import credentials_factory
-from tempest.common import dynamic_creds
from tempest import config
+from tempest.lib.common import dynamic_creds
LOG = None
@@ -141,18 +148,10 @@
admin_creds = credentials_factory.get_credentials(
fill_in=False, identity_version=identity_version, **admin_creds_dict)
return dynamic_creds.DynamicCredentialProvider(
- identity_version=identity_version,
name=opts.tag,
network_resources=network_resources,
- neutron_available=CONF.service_available.neutron,
- create_networks=CONF.auth.create_isolated_networks,
- identity_admin_role=CONF.identity.admin_role,
- identity_admin_domain_scope=CONF.identity.admin_domain_scope,
- project_network_cidr=CONF.network.project_network_cidr,
- project_network_mask_bits=CONF.network.project_network_mask_bits,
- public_network_id=CONF.network.public_network_id,
- admin_creds=admin_creds,
- **credentials_factory.get_dynamic_provider_params())
+ **credentials_factory.get_dynamic_provider_params(
+ identity_version, admin_creds=admin_creds))
def generate_resources(cred_provider, admin):
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index ac73cbf..d0aa7dc 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -54,17 +54,17 @@
not delete the projects themselves.
**--dry-run**: Creates a report (``./dry_run.json``) of the projects that will
-be cleaned up (in the ``_tenants_to_clean`` dictionary [1]_) and the global
+be cleaned up (in the ``_projects_to_clean`` dictionary [1]_) and the global
objects that will be removed (domains, flavors, images, roles, projects,
and users). Once the cleanup command is executed (e.g. run without
parameters), running it again with **--dry-run** should yield an empty report.
**--help**: Print the help text for the command and parameters.
-.. [1] The ``_tenants_to_clean`` dictionary in ``dry_run.json`` lists the
+.. [1] The ``_projects_to_clean`` dictionary in ``dry_run.json`` lists the
projects that ``tempest cleanup`` will loop through to delete child
objects, but the command will, by default, not delete the projects
- themselves. This may differ from the ``tenants`` list as you can clean
+ themselves. This may differ from the ``projects`` list as you can clean
the Tempest and alternate Tempest users and projects but they will not be
deleted unless the **--delete-tempest-conf-objects** flag is used to
force their deletion.
@@ -104,19 +104,20 @@
def init(self, parsed_args):
cleanup_service.init_conf()
self.options = parsed_args
- self.admin_mgr = credentials.AdminManager()
+ self.admin_mgr = clients.Manager(
+ credentials.get_configured_admin_credentials())
self.dry_run_data = {}
self.json_data = {}
self.admin_id = ""
self.admin_role_id = ""
- self.admin_tenant_id = ""
+ self.admin_project_id = ""
self._init_admin_ids()
self.admin_role_added = []
# available services
- self.tenant_services = cleanup_service.get_tenant_cleanup_services()
+ self.project_services = cleanup_service.get_project_cleanup_services()
self.global_services = cleanup_service.get_global_cleanup_services()
if parsed_args.init_saved_state:
@@ -132,24 +133,24 @@
is_save_state = False
if is_dry_run:
- self.dry_run_data["_tenants_to_clean"] = {}
+ self.dry_run_data["_projects_to_clean"] = {}
admin_mgr = self.admin_mgr
- # Always cleanup tempest and alt tempest tenants unless
+ # Always cleanup tempest and alt tempest projects unless
# they are in saved state json. Therefore is_preserve is False
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': False,
'is_save_state': is_save_state}
- tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
- tenants = tenant_service.list()
- print("Process %s tenants" % len(tenants))
+ project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
+ projects = project_service.list()
+ print("Process %s projects" % len(projects))
- # Loop through list of tenants and clean them up.
- for tenant in tenants:
- self._add_admin(tenant['id'])
- self._clean_tenant(tenant)
+ # Loop through list of projects and clean them up.
+ for project in projects:
+ self._add_admin(project['id'])
+ self._clean_project(project)
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
@@ -168,49 +169,51 @@
self._remove_admin_user_roles()
def _remove_admin_user_roles(self):
- tenant_ids = self.admin_role_added
- LOG.debug("Removing admin user roles where needed for tenants: %s",
- tenant_ids)
- for tenant_id in tenant_ids:
- self._remove_admin_role(tenant_id)
+ project_ids = self.admin_role_added
+ LOG.debug("Removing admin user roles where needed for projects: %s",
+ project_ids)
+ for project_id in project_ids:
+ self._remove_admin_role(project_id)
- def _clean_tenant(self, tenant):
- print("Cleaning tenant: %s " % tenant['name'])
+ def _clean_project(self, project):
+ print("Cleaning project: %s " % project['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
is_preserve = not self.options.delete_tempest_conf_objects
- tenant_id = tenant['id']
- tenant_name = tenant['name']
- tenant_data = None
+ project_id = project['id']
+ project_name = project['name']
+ project_data = None
if is_dry_run:
- tenant_data = dry_run_data["_tenants_to_clean"][tenant_id] = {}
- tenant_data['name'] = tenant_name
+ project_data = dry_run_data["_projects_to_clean"][project_id] = {}
+ project_data['name'] = project_name
kwargs = {"username": CONF.auth.admin_username,
"password": CONF.auth.admin_password,
- "tenant_name": tenant['name']}
+ "project_name": project['name']}
mgr = clients.Manager(credentials=credentials.get_credentials(
**kwargs))
- kwargs = {'data': tenant_data,
+ kwargs = {'data': project_data,
'is_dry_run': is_dry_run,
'saved_state_json': None,
'is_preserve': is_preserve,
'is_save_state': False,
- 'tenant_id': tenant_id}
- for service in self.tenant_services:
+ 'project_id': project_id}
+ for service in self.project_services:
svc = service(mgr, **kwargs)
svc.run()
def _init_admin_ids(self):
- tn_cl = self.admin_mgr.tenants_client
- rl_cl = self.admin_mgr.roles_client
+ pr_cl = self.admin_mgr.projects_client
+ rl_cl = self.admin_mgr.roles_v3_client
+ rla_cl = self.admin_mgr.role_assignments_client
+ us_cl = self.admin_mgr.users_v3_client
- tenant = identity.get_tenant_by_name(tn_cl,
- CONF.auth.admin_project_name)
- self.admin_tenant_id = tenant['id']
-
- user = identity.get_user_by_username(tn_cl, self.admin_tenant_id,
- CONF.auth.admin_username)
+ project = identity.get_project_by_name(pr_cl,
+ CONF.auth.admin_project_name)
+ self.admin_project_id = project['id']
+ user = identity.get_user_by_project(us_cl, rla_cl,
+ self.admin_project_id,
+ CONF.auth.admin_username)
self.admin_id = user['id']
roles = rl_cl.list_roles()['roles']
@@ -235,7 +238,7 @@
dest='delete_tempest_conf_objects',
default=False,
help="Force deletion of the tempest and "
- "alternate tempest users and tenants.")
+ "alternate tempest users and projects.")
parser.add_argument('--dry-run', action="store_true",
dest='dry_run', default=False,
help="Generate JSON file:" + DRY_RUN_JSON +
@@ -246,43 +249,44 @@
def get_description(self):
return 'Cleanup after tempest run'
- def _add_admin(self, tenant_id):
- rl_cl = self.admin_mgr.roles_client
+ def _add_admin(self, project_id):
+ rl_cl = self.admin_mgr.roles_v3_client
needs_role = True
- roles = rl_cl.list_user_roles_on_project(tenant_id,
+ roles = rl_cl.list_user_roles_on_project(project_id,
self.admin_id)['roles']
for role in roles:
if role['id'] == self.admin_role_id:
needs_role = False
- LOG.debug("User already had admin privilege for this tenant")
+ LOG.debug("User already had admin privilege for this project")
if needs_role:
- LOG.debug("Adding admin privilege for : %s", tenant_id)
- rl_cl.create_user_role_on_project(tenant_id, self.admin_id,
+ LOG.debug("Adding admin privilege for : %s", project_id)
+ rl_cl.create_user_role_on_project(project_id, self.admin_id,
self.admin_role_id)
- self.admin_role_added.append(tenant_id)
+ self.admin_role_added.append(project_id)
- def _remove_admin_role(self, tenant_id):
- LOG.debug("Remove admin user role for tenant: %s", tenant_id)
- # Must initialize AdminManager for each user role
+ def _remove_admin_role(self, project_id):
+ LOG.debug("Remove admin user role for projectt: %s", project_id)
+ # Must initialize Admin Manager for each user role
# Otherwise authentication exception is thrown, weird
- id_cl = credentials.AdminManager().identity_client
- if (self._tenant_exists(tenant_id)):
+ id_cl = clients.Manager(
+ credentials.get_configured_admin_credentials()).identity_client
+ if (self._project_exists(project_id)):
try:
- id_cl.delete_role_from_user_on_project(tenant_id,
+ id_cl.delete_role_from_user_on_project(project_id,
self.admin_id,
self.admin_role_id)
except Exception as ex:
- LOG.exception("Failed removing role from tenant which still"
+ LOG.exception("Failed removing role from project which still"
"exists, exception: %s", ex)
- def _tenant_exists(self, tenant_id):
- tn_cl = self.admin_mgr.tenants_client
+ def _project_exists(self, project_id):
+ pr_cl = self.admin_mgr.projects_client
try:
- t = tn_cl.show_tenant(tenant_id)
- LOG.debug("Tenant is: %s", str(t))
+ p = pr_cl.show_project(project_id)
+ LOG.debug("Project is: %s", str(p))
return True
except Exception as ex:
- LOG.debug("Tenant no longer exists? %s", ex)
+ LOG.debug("Project no longer exists? %s", ex)
return False
def _init_state(self):
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index f1c0a3e..d1e80f1 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -16,11 +16,12 @@
from oslo_log import log as logging
+from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest.common import identity
+from tempest.common import utils
from tempest.common.utils import net_info
from tempest import config
-from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -31,7 +32,7 @@
CONF_PRIV_NETWORK_NAME = None
CONF_PUB_NETWORK = None
CONF_PUB_ROUTER = None
-CONF_TENANTS = None
+CONF_PROJECTS = None
CONF_USERS = None
IS_CINDER = None
@@ -49,7 +50,7 @@
global CONF_PRIV_NETWORK_NAME
global CONF_PUB_NETWORK
global CONF_PUB_ROUTER
- global CONF_TENANTS
+ global CONF_PROJECTS
global CONF_USERS
global IS_CINDER
global IS_GLANCE
@@ -68,7 +69,7 @@
CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
CONF_PUB_NETWORK = CONF.network.public_network_id
CONF_PUB_ROUTER = CONF.network.public_router_id
- CONF_TENANTS = [CONF.auth.admin_project_name]
+ CONF_PROJECTS = [CONF.auth.admin_project_name]
CONF_USERS = [CONF.auth.admin_username]
if IS_NEUTRON:
@@ -78,16 +79,17 @@
def _get_network_id(net_name, project_name):
- am = credentials.AdminManager()
+ am = clients.Manager(
+ credentials.get_configured_admin_credentials())
net_cl = am.networks_client
- tn_cl = am.tenants_client
+ pr_cl = am.projects_client
networks = net_cl.list_networks()
- tenant = identity.get_tenant_by_name(tn_cl, project_name)
- t_id = tenant['id']
+ project = identity.get_project_by_name(pr_cl, project_name)
+ p_id = project['id']
n_id = None
for net in networks['networks']:
- if (net['tenant_id'] == t_id and net['name'] == net_name):
+ if (net['project_id'] == p_id and net['name'] == net_name):
n_id = net['id']
break
return n_id
@@ -139,7 +141,7 @@
def __init__(self, manager, **kwargs):
super(SnapshotService, self).__init__(kwargs)
- self.client = manager.snapshots_client
+ self.client = manager.snapshots_client_latest
def list(self):
client = self.client
@@ -213,7 +215,9 @@
class StackService(BaseService):
def __init__(self, manager, **kwargs):
super(StackService, self).__init__(kwargs)
- self.client = manager.orchestration_client
+ params = config.service_client_config('orchestration')
+ self.client = manager.orchestration.OrchestrationClient(
+ manager.auth_provider, **params)
def list(self):
client = self.client
@@ -315,7 +319,7 @@
class VolumeService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeService, self).__init__(kwargs)
- self.client = manager.volumes_client
+ self.client = manager.volumes_client_latest
def list(self):
client = self.client
@@ -340,7 +344,7 @@
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeQuotaService, self).__init__(kwargs)
- self.client = manager.volume_quotas_client
+ self.client = manager.volume_quotas_v2_client
def delete(self):
client = self.client
@@ -782,14 +786,14 @@
class IdentityService(BaseService):
def __init__(self, manager, **kwargs):
super(IdentityService, self).__init__(kwargs)
- self.client = manager.identity_client
+ self.client = manager.identity_v3_client
class UserService(BaseService):
def __init__(self, manager, **kwargs):
super(UserService, self).__init__(kwargs)
- self.client = manager.users_client
+ self.client = manager.users_v3_client
def list(self):
users = self.client.list_users()['users']
@@ -868,43 +872,43 @@
self.data['roles'][role['id']] = role['name']
-class TenantService(BaseService):
+class ProjectService(BaseService):
def __init__(self, manager, **kwargs):
- super(TenantService, self).__init__(kwargs)
- self.client = manager.tenants_client
+ super(ProjectService, self).__init__(kwargs)
+ self.client = manager.projects_client
def list(self):
- tenants = self.client.list_tenants()['tenants']
+ projects = self.client.list_projects()['projects']
if not self.is_save_state:
- tenants = [tenant for tenant in tenants if (tenant['id']
- not in self.saved_state_json['tenants'].keys()
- and tenant['name'] != CONF.auth.admin_project_name)]
+ projects = [project for project in projects if (project['id']
+ not in self.saved_state_json['projects'].keys()
+ and project['name'] != CONF.auth.admin_project_name)]
if self.is_preserve:
- tenants = [tenant for tenant in tenants if tenant['name']
- not in CONF_TENANTS]
+ projects = [project for project in projects if project['name']
+ not in CONF_PROJECTS]
- LOG.debug("List count, %s Tenants after reconcile", len(tenants))
- return tenants
+ LOG.debug("List count, %s Projects after reconcile", len(projects))
+ return projects
def delete(self):
- tenants = self.list()
- for tenant in tenants:
+ projects = self.list()
+ for project in projects:
try:
- self.client.delete_tenant(tenant['id'])
+ self.client.delete_project(project['id'])
except Exception:
- LOG.exception("Delete Tenant exception.")
+ LOG.exception("Delete project exception.")
def dry_run(self):
- tenants = self.list()
- self.data['tenants'] = tenants
+ projects = self.list()
+ self.data['projects'] = projects
def save_state(self):
- tenants = self.list()
- self.data['tenants'] = {}
- for tenant in tenants:
- self.data['tenants'][tenant['id']] = tenant['name']
+ projects = self.list()
+ self.data['projects'] = {}
+ for project in projects:
+ self.data['projects'][project['id']] = project['name']
class DomainService(BaseService):
@@ -944,35 +948,35 @@
self.data['domains'][domain['id']] = domain['name']
-def get_tenant_cleanup_services():
- tenant_services = []
+def get_project_cleanup_services():
+ project_services = []
# TODO(gmann): Tempest should provide some plugin hook for cleanup
# script extension to plugin tests also.
if IS_NOVA:
- tenant_services.append(ServerService)
- tenant_services.append(KeyPairService)
- tenant_services.append(SecurityGroupService)
- tenant_services.append(ServerGroupService)
+ project_services.append(ServerService)
+ project_services.append(KeyPairService)
+ project_services.append(SecurityGroupService)
+ project_services.append(ServerGroupService)
if not IS_NEUTRON:
- tenant_services.append(FloatingIpService)
- tenant_services.append(NovaQuotaService)
+ project_services.append(FloatingIpService)
+ project_services.append(NovaQuotaService)
if IS_HEAT:
- tenant_services.append(StackService)
+ project_services.append(StackService)
if IS_NEUTRON:
- tenant_services.append(NetworkFloatingIpService)
- if test.is_extension_enabled('metering', 'network'):
- tenant_services.append(NetworkMeteringLabelRuleService)
- tenant_services.append(NetworkMeteringLabelService)
- tenant_services.append(NetworkRouterService)
- tenant_services.append(NetworkPortService)
- tenant_services.append(NetworkSubnetService)
- tenant_services.append(NetworkService)
- tenant_services.append(NetworkSecGroupService)
+ project_services.append(NetworkFloatingIpService)
+ if utils.is_extension_enabled('metering', 'network'):
+ project_services.append(NetworkMeteringLabelRuleService)
+ project_services.append(NetworkMeteringLabelService)
+ project_services.append(NetworkRouterService)
+ project_services.append(NetworkPortService)
+ project_services.append(NetworkSubnetService)
+ project_services.append(NetworkService)
+ project_services.append(NetworkSecGroupService)
if IS_CINDER:
- tenant_services.append(SnapshotService)
- tenant_services.append(VolumeService)
- tenant_services.append(VolumeQuotaService)
- return tenant_services
+ project_services.append(SnapshotService)
+ project_services.append(VolumeService)
+ project_services.append(VolumeQuotaService)
+ return project_services
def get_global_cleanup_services():
@@ -982,7 +986,7 @@
if IS_GLANCE:
global_services.append(ImageService)
global_services.append(UserService)
- global_services.append(TenantService)
+ global_services.append(ProjectService)
global_services.append(DomainService)
global_services.append(RoleService)
return global_services
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index b36bf5c..f07f197 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -47,6 +47,12 @@
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
+You can also use the **--load-list** option that lets you pass a filepath to
+tempest run with the file format being in a non-regex format, similar to the
+tests generated by the **--list-tests** option. You can specify target tests
+by removing unnecessary tests from a list file which is generated from
+**--list-tests** option.
+
Test Execution
==============
There are several options to control how the tests are executed. By default
@@ -97,15 +103,20 @@
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
+from oslo_serialization import jsonutils as json
import six
from testrepository.commands import run_argv
+from tempest import clients
+from tempest.cmd import cleanup_service
from tempest.cmd import init
from tempest.cmd import workspace
+from tempest.common import credentials_factory as credentials
from tempest import config
CONF = config.CONF
+SAVED_STATE_JSON = "saved_state.json"
class TempestRun(command.Command):
@@ -174,6 +185,11 @@
else:
print("No .testr.conf file was found for local execution")
sys.exit(2)
+ if parsed_args.state:
+ self._init_state()
+ else:
+ pass
+
if parsed_args.combine:
temp_stream = tempfile.NamedTemporaryFile()
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
@@ -203,6 +219,26 @@
def get_description(self):
return 'Run tempest'
+ def _init_state(self):
+ print("Initializing saved state.")
+ data = {}
+ self.global_services = cleanup_service.get_global_cleanup_services()
+ self.admin_mgr = clients.Manager(
+ credentials.get_configured_admin_credentials())
+ admin_mgr = self.admin_mgr
+ kwargs = {'data': data,
+ 'is_dry_run': False,
+ 'saved_state_json': data,
+ 'is_preserve': False,
+ 'is_save_state': True}
+ for service in self.global_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
+
+ with open(SAVED_STATE_JSON, 'w+') as f:
+ f.write(json.dumps(data,
+ sort_keys=True, indent=2, separators=(',', ': ')))
+
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
parser = self._add_args(parser)
@@ -237,6 +273,12 @@
help='Path to a blacklist file, this file '
'contains a separate regex exclude on '
'each newline')
+ list_selector.add_argument('--load-list', '--load_list',
+ help='Path to a non-regex whitelist file, '
+ 'this file contains a seperate test '
+ 'on each newline. This command'
+ 'supports files created by the tempest'
+ 'run ``--list-tests`` command')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
@@ -253,6 +295,10 @@
parallel.add_argument('--serial', '-t', dest='parallel',
action='store_false',
help='Run tests serially')
+ parser.add_argument('--save-state', dest='state',
+ action='store_true',
+ help="To save the state of the cloud before "
+ "running tempest.")
# output args
parser.add_argument("--subunit", action='store_true',
help='Enable subunit v2 output')
@@ -284,6 +330,8 @@
options.append("--parallel")
if parsed_args.concurrency:
options.append("--concurrency=%s" % parsed_args.concurrency)
+ if parsed_args.load_list:
+ options.append("--load-list=%s" % parsed_args.load_list)
return options
def _run(self, regex, options):
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 8ee3055..f9ebe20 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -102,8 +102,8 @@
response_re = re.compile(r'.* Response - Headers: (?P<headers>.*)')
body_re = re.compile(r'.*Body: (?P<body>.*)')
- # Based on mitaka defaults:
- # http://docs.openstack.org/mitaka/config-reference/
+ # Based on newton defaults:
+ # http://docs.openstack.org/newton/config-reference/
# firewalls-default-ports.html
services = {
"8776": "Block Storage",
@@ -122,7 +122,8 @@
"873": "rsync",
"3260": "iSCSI",
"3306": "MySQL",
- "5672": "AMQP"}
+ "5672": "AMQP",
+ "8082": "murano"}
def __init__(self, services=None):
super(UrlParser, self).__init__()
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 0972a3c..fdf28d5 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -14,6 +14,51 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Verifies user's current tempest configuration.
+
+This command is used for updating or user's tempest configuration file based on
+api queries or replacing all option in a tempest configuration file for a full
+list of extensions.
+
+General Options
+===============
+
+-u, --update
+------------
+Update the config file with results from api queries. This assumes whatever is
+set in the config file is incorrect.
+
+-o FILE, --output=FILE
+----------------------
+Output file to write an updated config file to. This has to be a separate file
+from the original one. If one isn't specified with -u the values which should
+be changed will be printed to STDOUT.
+
+-r, --replace-ext
+-----------------
+If specified the all option will be replaced with a full list of extensions.
+
+Environment Variables
+=====================
+
+The command is workspace aware - it uses tempest config file tempest.conf
+located in ./etc/ directory.
+The path to the config file and it's name can be changed through environment
+variables.
+
+TEMPEST_CONFIG_DIR
+------------------
+Path to a directory where tempest configuration file is stored. If the variable
+is set, the default path (./etc/) is overridden.
+
+TEMPEST_CONFIG
+--------------
+Name of a tempest configuration file. If the variable is specified, the default
+name (tempest.conf) is overridden.
+
+"""
+
import argparse
import os
import re
@@ -30,6 +75,7 @@
from tempest.common import credentials_factory as credentials
from tempest import config
import tempest.lib.common.http
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -39,8 +85,8 @@
def _get_config_file():
- default_config_dir = os.path.join(os.path.abspath(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
+ config_dir = os.getcwd()
+ default_config_dir = os.path.join(config_dir, "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
@@ -69,7 +115,25 @@
def verify_glance_api_versions(os, update):
# Check glance api versions
- _, versions = os.image_client.get_versions()
+ # Since we want to verify that the configuration is correct, we cannot
+ # rely on a specific version of the API being available.
+ try:
+ _, versions = os.image_v1.ImagesClient().get_versions()
+ except lib_exc.NotFound:
+ # If not found, we use v2. The assumption is that either v1 or v2
+ # are available, since glance is marked as available in the catalog.
+ # If not, glance should be disabled in Tempest conf.
+ try:
+ versions = os.image_v2.VersionsClient().list_versions()['versions']
+ versions = [x['id'] for x in versions]
+ except lib_exc.NotFound:
+ msg = ('Glance is available in the catalog, but no known version, '
+ '(v1.x or v2.x) of Glance could be found, so Glance should '
+ 'be configured as not available')
+ LOG.warn(msg)
+ print_and_or_update('glance', 'service-available', False, update)
+ return
+
if CONF.image_feature_enabled.api_v1 != contains_version('v1.', versions):
print_and_or_update('api_v1', 'image-feature-enabled',
not CONF.image_feature_enabled.api_v1, update)
@@ -92,10 +156,15 @@
def _get_api_versions(os, service):
+ # Clients are used to obtain the base_url. Each client applies the
+ # appropriate filters to the catalog to extract a base_url which
+ # matches the configured region and endpoint_type.
+ # The base URL is used to obtain the list of versions available.
client_dict = {
- 'nova': os.servers_client,
- 'keystone': os.identity_client,
- 'cinder': os.volumes_client,
+ 'nova': os.compute.ServersClient(),
+ 'keystone': os.identity_v3.IdentityClient(
+ endpoint_type=CONF.identity.v3_endpoint_type),
+ 'cinder': os.volume_v3.VolumesClient(),
}
if service != 'keystone' and service != 'cinder':
# Since keystone and cinder may be listening on a path,
@@ -127,10 +196,6 @@
def verify_keystone_api_versions(os, update):
# Check keystone api versions
versions = _get_api_versions(os, 'keystone')
- if (CONF.identity_feature_enabled.api_v2 !=
- contains_version('v2.', versions)):
- print_and_or_update('api_v2', 'identity-feature-enabled',
- not CONF.identity_feature_enabled.api_v2, update)
if (CONF.identity_feature_enabled.api_v3 !=
contains_version('v3.', versions)):
print_and_or_update('api_v3', 'identity-feature-enabled',
@@ -167,13 +232,13 @@
def get_extension_client(os, service):
extensions_client = {
- 'nova': os.extensions_client,
- 'neutron': os.network_extensions_client,
- 'swift': os.capabilities_client,
+ 'nova': os.compute.ExtensionsClient(),
+ 'neutron': os.network.ExtensionsClient(),
+ 'swift': os.object_storage.CapabilitiesClient(),
# NOTE: Cinder v3 API is current and v2 and v1 are deprecated.
# V3 extension API is the same as v2, so we reuse the v2 client
# for v3 API also.
- 'cinder': os.volumes_v2_extension_client,
+ 'cinder': os.volume_v2.ExtensionsClient(),
}
if service not in extensions_client:
@@ -200,7 +265,7 @@
if service != 'swift':
resp = extensions_client.list_extensions()
else:
- __, resp = extensions_client.list_capabilities()
+ resp = extensions_client.list_capabilities()
# For Nova, Cinder and Neutron we use the alias name rather than the
# 'name' field because the alias is considered to be the canonical
# name.
@@ -344,8 +409,8 @@
help="Output file to write an updated config file to. "
"This has to be a separate file from the "
"original config file. If one isn't specified "
- "with -u the new config file will be printed to "
- "STDOUT")
+ "with -u the values which should be changed "
+ "will be printed to STDOUT")
parser.add_argument('-r', '--replace-ext', action='store_true',
help="If specified the all option will be replaced "
"with a full list of extensions")
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 9f467fe..86fe3f5 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -25,9 +25,9 @@
from oslo_log import log as logging
from oslo_utils import excutils
-from tempest.common import fixed_network
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import fixed_network
from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
@@ -41,6 +41,27 @@
LOG = logging.getLogger(__name__)
+def is_scheduler_filter_enabled(filter_name):
+ """Check the list of enabled compute scheduler filters from config.
+
+ This function checks whether the given compute scheduler filter is
+ available and configured in the config file. If the
+ scheduler_available_filters option is set to 'all' (Default value. which
+ means default filters are configured in nova) in tempest.conf then, this
+ function returns True with assumption that requested filter 'filter_name'
+ is one of available filter in nova ("nova.scheduler.filters.all_filters").
+ """
+
+ filters = CONF.compute_feature_enabled.scheduler_available_filters
+ if not filters:
+ return False
+ if 'all' in filters:
+ return True
+ if filter_name in filters:
+ return True
+ return False
+
+
def create_test_server(clients, validatable=False, validation_resources=None,
tenant_network=None, wait_until=None,
volume_backed=False, name=None, flavor=None,
@@ -107,6 +128,8 @@
"this stage.")
raise ValueError(msg)
+ LOG.debug("Provisioning test server with validation resources %s",
+ validation_resources)
if 'security_groups' in kwargs:
kwargs['security_groups'].append(
{'name': validation_resources['security_group']['name']})
@@ -177,9 +200,27 @@
body = rest_client.ResponseBody(body.response, body['server'])
servers = [body]
- # The name of the method to associate a floating IP to as server is too
- # long for PEP8 compliance so:
- assoc = clients.compute_floating_ips_client.associate_floating_ip_to_server
+ def _setup_validation_fip():
+ if CONF.service_available.neutron:
+ ifaces = clients.interfaces_client.list_interfaces(server['id'])
+ validation_port = None
+ for iface in ifaces['interfaceAttachments']:
+ if iface['net_id'] == tenant_network['id']:
+ validation_port = iface['port_id']
+ break
+ if not validation_port:
+ # NOTE(artom) This will get caught by the catch-all clause in
+ # the wait_until loop below
+ raise ValueError('Unable to setup floating IP for validation: '
+ 'port not found on tenant network')
+ clients.floating_ips_client.update_floatingip(
+ validation_resources['floating_ip']['id'],
+ port_id=validation_port)
+ else:
+ fip_client = clients.compute_floating_ips_client
+ fip_client.associate_floating_ip_to_server(
+ floating_ip=validation_resources['floating_ip']['ip'],
+ server_id=servers[0]['id'])
if wait_until:
for server in servers:
@@ -191,9 +232,7 @@
# creation will fail with the condition above (l.58).
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
- assoc(floating_ip=validation_resources[
- 'floating_ip']['ip'],
- server_id=servers[0]['id'])
+ _setup_validation_fip()
except Exception:
with excutils.save_and_reraise_exception():
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index e6b46ed..da34975 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -14,10 +14,10 @@
from oslo_concurrency import lockutils
from tempest import clients
-from tempest.common import dynamic_creds
-from tempest.common import preprov_creds
from tempest import config
from tempest.lib import auth
+from tempest.lib.common import dynamic_creds
+from tempest.lib.common import preprov_creds
from tempest.lib import exceptions
CONF = config.CONF
@@ -39,19 +39,69 @@
# Subset of the parameters of credential providers that depend on configuration
-def get_common_provider_params():
+def _get_common_provider_params(identity_version):
+ if identity_version == 'v3':
+ identity_uri = CONF.identity.uri_v3
+ elif identity_version == 'v2':
+ identity_uri = CONF.identity.uri
+ else:
+ raise exceptions.InvalidIdentityVersion(
+ identity_version=identity_version)
return {
+ 'identity_version': identity_version,
+ 'identity_uri': identity_uri,
'credentials_domain': CONF.auth.default_credentials_domain_name,
'admin_role': CONF.identity.admin_role
}
-def get_dynamic_provider_params():
- return get_common_provider_params()
+def get_dynamic_provider_params(identity_version, admin_creds=None):
+ """Dynamic provider parameters setup from config
+
+ This helper returns a dict of parameter that can be used to initialise
+ a `DynamicCredentialProvider` according to tempest configuration.
+ Parameters that are not configuration specific (name, network_resources)
+ are not returned.
+
+ :param identity_version: 'v2' or 'v3'
+ :param admin_creds: An object of type `auth.Credentials`. If None, it
+ is built from the configuration file as well.
+ :return: A dict with the parameters
+ """
+ _common_params = _get_common_provider_params(identity_version)
+ admin_creds = admin_creds or get_configured_admin_credentials(
+ fill_in=True, identity_version=identity_version)
+ if identity_version == 'v3':
+ endpoint_type = CONF.identity.v3_endpoint_type
+ elif identity_version == 'v2':
+ endpoint_type = CONF.identity.v2_admin_endpoint_type
+ return dict(_common_params, **dict([
+ ('admin_creds', admin_creds),
+ ('identity_admin_domain_scope', CONF.identity.admin_domain_scope),
+ ('identity_admin_role', CONF.identity.admin_role),
+ ('extra_roles', CONF.auth.tempest_roles),
+ ('neutron_available', CONF.service_available.neutron),
+ ('project_network_cidr', CONF.network.project_network_cidr),
+ ('project_network_mask_bits', CONF.network.project_network_mask_bits),
+ ('public_network_id', CONF.network.public_network_id),
+ ('create_networks', (CONF.auth.create_isolated_networks and not
+ CONF.network.shared_physical_network)),
+ ('resource_prefix', CONF.resources_prefix),
+ ('identity_admin_endpoint_type', endpoint_type)
+ ]))
-def get_preprov_provider_params():
- _common_params = get_common_provider_params()
+def get_preprov_provider_params(identity_version):
+ """Pre-provisioned provider parameters setup from config
+
+ This helper returns a dict of parameter that can be used to initialise
+ a `PreProvisionedCredentialProvider` according to tempest configuration.
+ Parameters that are not configuration specific (name) are not returned.
+
+ :param identity_version: 'v2' or 'v3'
+ :return: A dict with the parameters
+ """
+ _common_params = _get_common_provider_params(identity_version)
reseller_admin_role = CONF.object_storage.reseller_admin_role
return dict(_common_params, **dict([
('accounts_lock_dir', lockutils.get_lock_path(CONF)),
@@ -61,53 +111,55 @@
]))
-# Return the right implementation of CredentialProvider based on config
-# Dropping interface and password, as they are never used anyways
-# TODO(andreaf) Drop them from the CredentialsProvider interface completely
def get_credentials_provider(name, network_resources=None,
force_tenant_isolation=False,
identity_version=None):
+ """Return the right implementation of CredentialProvider based on config
+
+ This helper returns the right implementation of CredentialProvider based on
+ config and on the value of force_tenant_isolation.
+
+ :param name: When provided, it makes it possible to associate credential
+ artifacts back to the owner (test class).
+ :param network_resources: Dictionary of network resources to be allocated
+ for each test account. Only valid for the dynamic
+ credentials provider.
+ :param force_tenant_isolation: Always return a `DynamicCredentialProvider`,
+ regardless of the configuration.
+ :param identity_version: Use the specified identity API version, regardless
+ of the configuration. Valid values are 'v2', 'v3'.
+ """
# If a test requires a new account to work, it can have it via forcing
# dynamic credentials. A new account will be produced only for that test.
# In case admin credentials are not available for the account creation,
# the test should be skipped else it would fail.
identity_version = identity_version or CONF.identity.auth_version
if CONF.auth.use_dynamic_credentials or force_tenant_isolation:
- admin_creds = get_configured_admin_credentials(
- fill_in=True, identity_version=identity_version)
return dynamic_creds.DynamicCredentialProvider(
name=name,
network_resources=network_resources,
- identity_version=identity_version,
- admin_creds=admin_creds,
- identity_admin_domain_scope=CONF.identity.admin_domain_scope,
- identity_admin_role=CONF.identity.admin_role,
- extra_roles=CONF.auth.tempest_roles,
- neutron_available=CONF.service_available.neutron,
- project_network_cidr=CONF.network.project_network_cidr,
- project_network_mask_bits=CONF.network.project_network_mask_bits,
- public_network_id=CONF.network.public_network_id,
- create_networks=(CONF.auth.create_isolated_networks and not
- CONF.network.shared_physical_network),
- resource_prefix=CONF.resources_prefix,
- **get_dynamic_provider_params())
+ **get_dynamic_provider_params(identity_version))
else:
if CONF.auth.test_accounts_file:
# Most params are not relevant for pre-created accounts
return preprov_creds.PreProvisionedCredentialProvider(
- name=name, identity_version=identity_version,
- **get_preprov_provider_params())
+ name=name,
+ **get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
'A valid credential provider is needed')
-# We want a helper function here to check and see if admin credentials
-# are available so we can do a single call from skip_checks if admin
-# creds area available.
-# This depends on identity_version as there may be admin credentials
-# available for v2 but not for v3.
def is_admin_available(identity_version):
+ """Helper to check for admin credentials
+
+ Helper function to check if a set of admin credentials is available so we
+ can do a single call from skip_checks.
+ This helper depends on identity_version as there may be admin credentials
+ available for v2 but not for v3.
+
+ :param identity_version: 'v2' or 'v3'
+ """
is_admin = True
# If dynamic credentials is enabled admin will be available
if CONF.auth.use_dynamic_credentials:
@@ -115,8 +167,8 @@
# Check whether test accounts file has the admin specified or not
elif CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
- identity_version=identity_version, name='check_admin',
- **get_preprov_provider_params())
+ name='check_admin',
+ **get_preprov_provider_params(identity_version))
if not check_accounts.admin_available():
is_admin = False
else:
@@ -128,20 +180,24 @@
return is_admin
-# We want a helper function here to check and see if alt credentials
-# are available so we can do a single call from skip_checks if alt
-# creds area available.
-# This depends on identity_version as there may be alt credentials
-# available for v2 but not for v3.
def is_alt_available(identity_version):
+ """Helper to check for alt credentials
+
+ Helper function to check if a second set of credentials is available (aka
+ alt credentials) so we can do a single call from skip_checks.
+ This helper depends on identity_version as there may be alt credentials
+ available for v2 but not for v3.
+
+ :param identity_version: 'v2' or 'v3'
+ """
# If dynamic credentials is enabled alt will be available
if CONF.auth.use_dynamic_credentials:
return True
# Check whether test accounts file has the admin specified or not
if CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
- identity_version=identity_version, name='check_alt',
- **get_preprov_provider_params())
+ name='check_alt',
+ **get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
'A valid credential provider is needed')
@@ -163,17 +219,20 @@
'alt_user': ('identity', 'alt')
}
-DEFAULT_PARAMS = {
- 'disable_ssl_certificate_validation':
- CONF.identity.disable_ssl_certificate_validation,
- 'ca_certs': CONF.identity.ca_certificates_file,
- 'trace_requests': CONF.debug.trace_requests
-}
-
-# Read credentials from configuration, builds a Credentials object
-# based on the specified or configured version
def get_configured_admin_credentials(fill_in=True, identity_version=None):
+ """Get admin credentials from the config file
+
+ Read credentials from configuration, builds a Credentials object based on
+ the specified or configured version
+
+ :param fill_in: If True, a request to the Token API is submitted, and the
+ credential object is filled in with all names and IDs from
+ the token API response.
+ :param identity_version: The identity version to talk to and the type of
+ credentials object to be created. 'v2' or 'v3'.
+ :returns: An object of a sub-type of `auth.Credentials`
+ """
identity_version = identity_version or CONF.identity.auth_version
if identity_version not in ('v2', 'v3'):
@@ -186,7 +245,7 @@
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
- params = DEFAULT_PARAMS.copy()
+ params = config.service_client_config()
for attr in conf_attributes:
params[attr] = getattr(CONF.auth, 'admin_' + attr)
# Build and validate credentials. We are reading configured credentials,
@@ -202,10 +261,21 @@
return credentials
-# Wrapper around auth.get_credentials to use the configured identity version
-# if none is specified
def get_credentials(fill_in=True, identity_version=None, **kwargs):
- params = dict(DEFAULT_PARAMS, **kwargs)
+ """Get credentials from dict based on config
+
+ Wrapper around auth.get_credentials to use the configured identity version
+ if none is specified.
+
+ :param fill_in: If True, a request to the Token API is submitted, and the
+ credential object is filled in with all names and IDs from
+ the token API response.
+ :param identity_version: The identity version to talk to and the type of
+ credentials object to be created. 'v2' or 'v3'.
+ :param kwargs: Attributes to be used to build the Credentials object.
+ :returns: An object of a sub-type of `auth.Credentials`
+ """
+ params = dict(config.service_client_config(), **kwargs)
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
# To honour the "default_credentials_domain_name", if not domain
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index 469defe..eaf651b 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -13,8 +13,21 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest import config
+from tempest.lib.common import cred_client
from tempest.lib import exceptions as lib_exc
+CONF = config.CONF
+
+
+def get_project_by_name(client, project_name):
+ projects = client.list_projects({'name': project_name})['projects']
+ for project in projects:
+ if project['name'] == project_name:
+ return project
+ raise lib_exc.NotFound('No such project(%s) in %s' % (project_name,
+ projects))
+
def get_tenant_by_name(client, tenant_name):
tenants = client.list_tenants()['tenants']
@@ -30,3 +43,49 @@
if user['name'] == username:
return user
raise lib_exc.NotFound('No such user(%s) in %s' % (username, users))
+
+
+def get_user_by_project(users_client, roles_client, project_id, username):
+ users = users_client.list_users(**{'name': username})['users']
+ users_in_project = roles_client.list_role_assignments(
+ **{'scope.project.id': project_id})['role_assignments']
+ for user in users:
+ if user['name'] == username:
+ for u in users_in_project:
+ if u['user']['id'] == user['id']:
+ return user
+ raise lib_exc.NotFound('No such user(%s) in %s' % (username, users))
+
+
+def identity_utils(clients):
+ """A client that abstracts v2 and v3 identity operations.
+
+ This can be used for creating and tearing down projects in tests. It
+ should not be used for testing identity features.
+
+ :param clients: a client manager.
+ :return
+ """
+ if CONF.identity.auth_version == 'v2':
+ client = clients.identity_client
+ users_client = clients.users_client
+ project_client = clients.tenants_client
+ roles_client = clients.roles_client
+ domains_client = None
+ else:
+ client = clients.identity_v3_client
+ users_client = clients.users_v3_client
+ project_client = clients.projects_client
+ roles_client = clients.roles_v3_client
+ domains_client = clients.domains_client
+
+ try:
+ domain = client.auth_provider.credentials.project_domain_name
+ except AttributeError:
+ domain = CONF.auth.default_credentials_domain_name
+
+ return cred_client.get_creds_client(client, project_client,
+ users_client,
+ roles_client,
+ domains_client,
+ project_domain_name=domain)
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 84e31d0..aa81864 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -12,10 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from functools import partial
+import testtools
+
from tempest import config
+from tempest.exceptions import InvalidServiceTag
from tempest.lib.common.utils import data_utils as lib_data_utils
+from tempest.lib import decorators
+
CONF = config.CONF
@@ -36,3 +42,89 @@
return attr_obj
data_utils = DataUtils()
+
+
+def get_service_list():
+ service_list = {
+ 'compute': CONF.service_available.nova,
+ 'image': CONF.service_available.glance,
+ 'volume': CONF.service_available.cinder,
+ # NOTE(masayukig): We have two network services which are neutron and
+ # nova-network. And we have no way to know whether nova-network is
+ # available or not. After the pending removal of nova-network from
+ # nova, we can treat the network/neutron case in the same manner as
+ # the other services.
+ 'network': True,
+ # NOTE(masayukig): Tempest tests always require the identity service.
+ # So we should set this True here.
+ 'identity': True,
+ 'object_storage': CONF.service_available.swift,
+ }
+ return service_list
+
+
+def services(*args):
+ """A decorator used to set an attr for each service used in a test case
+
+ This decorator applies a testtools attr for each service that gets
+ exercised by a test case.
+ """
+ def decorator(f):
+ known_services = get_service_list()
+
+ for service in args:
+ if service not in known_services:
+ raise InvalidServiceTag('%s is not a valid service' % service)
+ decorators.attr(type=list(args))(f)
+
+ @functools.wraps(f)
+ def wrapper(*func_args, **func_kwargs):
+ service_list = get_service_list()
+
+ for service in args:
+ if not service_list[service]:
+ msg = 'Skipped because the %s service is not available' % (
+ service)
+ raise testtools.TestCase.skipException(msg)
+ return f(*func_args, **func_kwargs)
+ return wrapper
+ return decorator
+
+
+def requires_ext(**kwargs):
+ """A decorator to skip tests if an extension is not enabled
+
+ @param extension
+ @param service
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*func_args, **func_kwargs):
+ if not is_extension_enabled(kwargs['extension'],
+ kwargs['service']):
+ msg = "Skipped because %s extension: %s is not enabled" % (
+ kwargs['service'], kwargs['extension'])
+ raise testtools.TestCase.skipException(msg)
+ return func(*func_args, **func_kwargs)
+ return wrapper
+ return decorator
+
+
+def is_extension_enabled(extension_name, service):
+ """A function that will check the list of enabled extensions from config
+
+ """
+ config_dict = {
+ 'compute': CONF.compute_feature_enabled.api_extensions,
+ 'volume': CONF.volume_feature_enabled.api_extensions,
+ 'network': CONF.network_feature_enabled.api_extensions,
+ 'object': CONF.object_storage_feature_enabled.discoverable_apis,
+ 'identity': CONF.identity_feature_enabled.api_extensions
+ }
+ if not config_dict[service]:
+ return False
+ if config_dict[service][0] == 'all':
+ return True
+ if extension_name in config_dict[service]:
+ return True
+ return False
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 99a628e..52ccfa9 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -70,7 +70,7 @@
if selected:
return "\n".join(selected)
else:
- msg = "'TYPE' column is requred but the output doesn't have it: "
+ msg = "'TYPE' column is required but the output doesn't have it: "
raise tempest.lib.exceptions.TempestException(msg + output)
def get_boot_time(self):
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
deleted file mode 100644
index 9e83a07..0000000
--- a/tempest/common/validation_resources.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_log import log as logging
-
-from tempest import config
-
-from tempest.lib.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def _create_neutron_sec_group_rules(os, sec_group):
- sec_group_rules_client = os.security_group_rules_client
- ethertype = 'IPv4'
- if CONF.validation.ip_version_for_ssh == 6:
- ethertype = 'IPv6'
-
- sec_group_rules_client.create_security_group_rule(
- security_group_id=sec_group['id'],
- protocol='tcp',
- ethertype=ethertype,
- port_range_min=22,
- port_range_max=22,
- direction='ingress')
- sec_group_rules_client.create_security_group_rule(
- security_group_id=sec_group['id'],
- protocol='icmp',
- ethertype=ethertype,
- direction='ingress')
-
-
-def create_ssh_security_group(os, add_rule=False):
- security_groups_client = os.compute_security_groups_client
- security_group_rules_client = os.compute_security_group_rules_client
- sg_name = data_utils.rand_name('securitygroup-')
- sg_description = data_utils.rand_name('description-')
- security_group = security_groups_client.create_security_group(
- name=sg_name, description=sg_description)['security_group']
- if add_rule:
- if CONF.service_available.neutron:
- _create_neutron_sec_group_rules(os, security_group)
- else:
- security_group_rules_client.create_security_group_rule(
- parent_group_id=security_group['id'], ip_protocol='tcp',
- from_port=22, to_port=22)
- security_group_rules_client.create_security_group_rule(
- parent_group_id=security_group['id'], ip_protocol='icmp',
- from_port=-1, to_port=-1)
- LOG.debug("SSH Validation resource security group with tcp and icmp "
- "rules %s created", sg_name)
- return security_group
-
-
-def create_validation_resources(os, validation_resources=None):
- # Create and Return the validation resources required to validate a VM
- validation_data = {}
- if validation_resources:
- if validation_resources['keypair']:
- keypair_name = data_utils.rand_name('keypair')
- validation_data.update(os.keypairs_client.create_keypair(
- name=keypair_name))
- LOG.debug("Validation resource key %s created", keypair_name)
- add_rule = False
- if validation_resources['security_group']:
- if validation_resources['security_group_rules']:
- add_rule = True
- validation_data['security_group'] = \
- create_ssh_security_group(os, add_rule)
- if validation_resources['floating_ip']:
- if CONF.service_available.neutron:
- floatingip = os.floating_ips_client.create_floatingip(
- floating_network_id=CONF.network.public_network_id)
- # validation_resources['floating_ip'] has historically looked
- # like a compute API POST /os-floating-ips response, so we need
- # to mangle it a bit for a Neutron response with different
- # fields.
- validation_data['floating_ip'] = floatingip['floatingip']
- validation_data['floating_ip']['ip'] = (
- floatingip['floatingip']['floating_ip_address'])
- else:
- # NOTE(mriedem): The os-floating-ips compute API was deprecated
- # in the 2.36 microversion. Any tests for CRUD operations on
- # floating IPs using the compute API should be capped at 2.35.
- validation_data.update(
- os.compute_floating_ips_client.create_floating_ip(
- pool=CONF.network.floating_network_name))
- return validation_data
-
-
-def clear_validation_resources(os, validation_data=None):
- # Cleanup the vm validation resources
- has_exception = None
- if validation_data:
- if 'keypair' in validation_data:
- keypair_client = os.keypairs_client
- keypair_name = validation_data['keypair']['name']
- try:
- keypair_client.delete_keypair(keypair_name)
- except lib_exc.NotFound:
- LOG.warning(
- "Keypair %s is not found when attempting to delete",
- keypair_name
- )
- except Exception as exc:
- LOG.exception('Exception raised while deleting key %s',
- keypair_name)
- if not has_exception:
- has_exception = exc
- if 'security_group' in validation_data:
- security_group_client = os.compute_security_groups_client
- sec_id = validation_data['security_group']['id']
- try:
- security_group_client.delete_security_group(sec_id)
- security_group_client.wait_for_resource_deletion(sec_id)
- except lib_exc.NotFound:
- LOG.warning("Security group %s is not found when attempting "
- "to delete", sec_id)
- except lib_exc.Conflict as exc:
- LOG.exception('Conflict while deleting security '
- 'group %s VM might not be deleted', sec_id)
- if not has_exception:
- has_exception = exc
- except Exception as exc:
- LOG.exception('Exception raised while deleting security '
- 'group %s', sec_id)
- if not has_exception:
- has_exception = exc
- if 'floating_ip' in validation_data:
- floating_client = os.compute_floating_ips_client
- fip_id = validation_data['floating_ip']['id']
- try:
- floating_client.delete_floating_ip(fip_id)
- except lib_exc.NotFound:
- LOG.warning('Floating ip %s not found while attempting to '
- 'delete', fip_id)
- except Exception as exc:
- LOG.exception('Exception raised while deleting ip %s', fip_id)
- if not has_exception:
- has_exception = exc
- if has_exception:
- raise has_exception
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 93e6fbf..10afee0 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -179,25 +179,27 @@
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, status):
- """Waits for a volume resource to reach a given status.
+def wait_for_volume_resource_status(client, resource_id, statuses):
+ """Waits for a volume resource to reach any of the specified statuses.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
"""
+ if not isinstance(statuses, list):
+ statuses = [statuses]
resource_name = re.findall(
- r'(Volume|Snapshot|Backup|Group)',
- client.__class__.__name__)[0].lower()
+ r'(volume|group-snapshot|snapshot|backup|group)',
+ client.resource_type)[-1].replace('-', '_')
show_resource = getattr(client, 'show_' + resource_name)
resource_status = show_resource(resource_id)[resource_name]['status']
start = int(time.time())
- while resource_status != status:
+ while resource_status not in statuses:
time.sleep(client.build_interval)
resource_status = show_resource(resource_id)[
'{}'.format(resource_name)]['status']
- if resource_status == 'error' and resource_status != status:
+ if resource_status == 'error' and resource_status not in statuses:
raise exceptions.VolumeResourceBuildErrorException(
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
@@ -206,9 +208,11 @@
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
- (resource_name, resource_id, status, resource_status,
+ (resource_name, resource_id, statuses, resource_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)
+ LOG.info('%s %s reached %s after waiting for %f seconds',
+ resource_name, resource_id, statuses, time.time() - start)
def wait_for_volume_retype(client, volume_id, new_volume_type):
diff --git a/tempest/config.py b/tempest/config.py
index af9eefc..b14d4fd 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -15,15 +15,12 @@
from __future__ import print_function
-import functools
import os
import tempfile
-import debtcollector.removals
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
-import testtools
from tempest.lib import exceptions
from tempest.lib.services import clients
@@ -197,6 +194,8 @@
default=60,
help='Timeout in seconds to wait for the http request to '
'return'),
+ cfg.StrOpt('proxy_url',
+ help='Specify an http proxy to use.')
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
@@ -208,8 +207,14 @@
help='Does the identity service have delegation and '
'impersonation enabled'),
cfg.BoolOpt('api_v2',
- default=True,
- help='Is the v2 identity API enabled'),
+ default=False,
+ help='Is the v2 identity API enabled',
+ deprecated_for_removal=True,
+ deprecated_reason='The identity v2.0 API was removed in the '
+ 'Queens release. Tests that exercise the '
+ 'v2.0 API will be removed from tempest in '
+ 'the v22.0.0 release. They are kept only to '
+ 'test stable branches.'),
cfg.BoolOpt('api_v2_admin',
default=True,
help="Is the v2 identity admin API available? This setting "
@@ -222,7 +227,8 @@
help="A list of enabled identity extensions with a special "
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
- "To get the list of extensions run: 'keystone discover'"),
+ "To get the list of extensions run: "
+ "'openstack extension list --identity'"),
# TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed
# in Newton and Ocata. This option can be removed after Mitaka is end of
# life.
@@ -234,6 +240,12 @@
deprecated_reason="This feature flag was introduced to "
"support testing of old OpenStack versions, "
"which are not supported anymore"),
+ cfg.BoolOpt('domain_specific_drivers',
+ default=False,
+ help='Are domain specific drivers enabled? '
+ 'This configuration value should be same as '
+ '[identity]->domain_specific_drivers_enabled '
+ 'in keystone.conf.'),
cfg.BoolOpt('security_compliance',
default=False,
help='Does the environment have the security compliance '
@@ -833,7 +845,14 @@
help="Is the v2 volume API enabled"),
cfg.BoolOpt('api_v3',
default=True,
- help="Is the v3 volume API enabled")
+ help="Is the v3 volume API enabled"),
+ cfg.BoolOpt('extend_attached_volume',
+ default=False,
+ help='Does the cloud support extending the size of a volume '
+ 'which is currently attached to a server instance? This '
+ 'depends on the 3.42 volume API microversion and the '
+ '2.51 compute API microversion. Also, not all volume or '
+ 'compute backends support this operation.')
]
@@ -1278,79 +1297,6 @@
CONF = TempestConfigProxy()
-@debtcollector.removals.remove(
- message='use testtools.skipUnless instead', removal_version='Queens')
-def skip_unless_config(*args):
- """Decorator to raise a skip if a config opt doesn't exist or is False
-
- :param str group: The first arg, the option group to check
- :param str name: The second arg, the option name to check
- :param str msg: Optional third arg, the skip msg to use if a skip is raised
- :raises testtools.TestCaseskipException: If the specified config option
- doesn't exist or it exists and evaluates to False
- """
- def decorator(f):
- group = args[0]
- name = args[1]
-
- @functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
- if not hasattr(CONF, group):
- msg = "Config group %s doesn't exist" % group
- raise testtools.TestCase.skipException(msg)
-
- conf_group = getattr(CONF, group)
- if not hasattr(conf_group, name):
- msg = "Config option %s.%s doesn't exist" % (group,
- name)
- raise testtools.TestCase.skipException(msg)
-
- value = getattr(conf_group, name)
- if not value:
- if len(args) == 3:
- msg = args[2]
- else:
- msg = "Config option %s.%s is false" % (group,
- name)
- raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
- return wrapper
- return decorator
-
-
-@debtcollector.removals.remove(
- message='use testtools.skipIf instead', removal_version='Queens')
-def skip_if_config(*args):
- """Raise a skipException if a config exists and is True
-
- :param str group: The first arg, the option group to check
- :param str name: The second arg, the option name to check
- :param str msg: Optional third arg, the skip msg to use if a skip is raised
- :raises testtools.TestCase.skipException: If the specified config option
- exists and evaluates to True
- """
- def decorator(f):
- group = args[0]
- name = args[1]
-
- @functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
- if hasattr(CONF, group):
- conf_group = getattr(CONF, group)
- if hasattr(conf_group, name):
- value = getattr(conf_group, name)
- if value:
- if len(args) == 3:
- msg = args[2]
- else:
- msg = "Config option %s.%s is false" % (group,
- name)
- raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
- return wrapper
- return decorator
-
-
def service_client_config(service_client_name=None):
"""Return a dict with the parameters to init service clients
@@ -1371,6 +1317,7 @@
* `ca_certs`
* `trace_requests`
* `http_timeout`
+ * `proxy_url`
The dict returned by this does not fit a few service clients:
@@ -1393,7 +1340,8 @@
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests,
- 'http_timeout': CONF.service_clients.http_timeout
+ 'http_timeout': CONF.service_clients.http_timeout,
+ 'proxy_url': CONF.service_clients.proxy_url,
}
if service_client_name is None:
@@ -1447,7 +1395,7 @@
module = service_clients[service_client]
configs = service_client.split('.')[0]
service_client_data = dict(
- name=service_client.replace('.', '_'),
+ name=service_client.replace('.', '_').replace('-', '_'),
service_version=service_client,
module_path=module.__name__,
client_names=module.__all__,
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index a437761..a430d5d 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -52,12 +52,5 @@
"the configured network")
-# NOTE(andreaf) This exception is added here to facilitate the migration
-# of get_network_from_name and preprov_creds to tempest.lib, and it should
-# be migrated along with them
-class InvalidTestResource(exceptions.TempestException):
- message = "%(name)s is not a valid %(type)s, or the name is ambiguous"
-
-
-class RFCViolation(exceptions.RestClientException):
- message = "RFC Violation"
+class InvalidServiceTag(exceptions.TempestException):
+ message = "Invalid service tag"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 067da09..aae685c 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -33,6 +33,7 @@
METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+")
METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+")
CLASS = re.compile(r"^class .+")
+EX_ATTRIBUTE = re.compile(r'(\s+|\()(e|ex|exc|exception).message(\s+|\))')
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
@@ -294,6 +295,17 @@
yield(0, msg)
+def unsupported_exception_attribute_PY3(logical_line):
+ """Check Unsupported 'message' exception attribute in PY3
+
+ T116
+ """
+ result = EX_ATTRIBUTE.search(logical_line)
+ msg = ("[T116] Unsupported 'message' Exception attribute in PY3")
+ if result:
+ yield(0, msg)
+
+
def factory(register):
register(import_no_clients_in_api_and_scenario_tests)
register(scenario_tests_need_service_tags)
@@ -309,3 +321,4 @@
register(dont_use_config_in_tempest_lib)
register(use_rand_uuid_instead_of_uuid4)
register(dont_put_admin_tests_on_nonadmin_path)
+ register(unsupported_exception_attribute_PY3)
diff --git a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
index a3c9099..28ed816 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
@@ -65,7 +65,7 @@
'items': {
'type': 'object',
'properties': {
- 'version': {'type': 'integer'},
+ 'version': {'enum': [4, 6]},
'addr': {
'type': 'string',
'oneOf': [
diff --git a/tempest/lib/api_schema/response/compute/v2_1/servers.py b/tempest/lib/api_schema/response/compute/v2_1/servers.py
index 7360396..2954de0 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/servers.py
@@ -420,8 +420,13 @@
'properties': {
'event': {'type': 'string'},
'start_time': parameter_types.date_time,
- 'finish_time': parameter_types.date_time,
- 'result': {'type': 'string'},
+ # The finish_time, result and optionally traceback are all
+ # possibly None (null) until the event is actually finished.
+ # The traceback would only be set if there was an error, but
+ # when the event is complete both finish_time and result will
+ # be set.
+ 'finish_time': parameter_types.date_time_or_null,
+ 'result': {'type': ['string', 'null']},
'traceback': {'type': ['string', 'null']}
},
'additionalProperties': False,
diff --git a/tempest/lib/api_schema/response/compute/v2_1/services.py b/tempest/lib/api_schema/response/compute/v2_1/services.py
index 6949f86..3b58ece 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/services.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/services.py
@@ -65,3 +65,25 @@
'required': ['service']
}
}
+
+disable_log_reason = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'disabled_reason': {'type': 'string'},
+ 'binary': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'status': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['disabled_reason', 'binary', 'host', 'status']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['service']
+ }
+}
diff --git a/tempest/tests/services/__init__.py b/tempest/lib/api_schema/response/compute/v2_11/__init__.py
similarity index 100%
rename from tempest/tests/services/__init__.py
rename to tempest/lib/api_schema/response/compute/v2_11/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_11/services.py b/tempest/lib/api_schema/response/compute/v2_11/services.py
new file mode 100644
index 0000000..18b833b
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_11/services.py
@@ -0,0 +1,46 @@
+# Copyright 2017 AT&T Corporation.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import services
+
+
+list_services = copy.deepcopy(services.list_services)
+list_services['response_body']['properties']['services']['items'][
+ 'properties']['forced_down'] = {'type': 'boolean'}
+list_services['response_body']['properties']['services']['items'][
+ 'required'].append('forced_down')
+
+update_forced_down = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'binary': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'forced_down': {'type': 'boolean'}
+ },
+ 'additionalProperties': False,
+ 'required': ['binary', 'host', 'forced_down']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['service']
+ }
+}
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index ab4308f..2dd9d00 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -261,12 +261,13 @@
def __init__(self, credentials, auth_url,
disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None, scope='project',
- http_timeout=None):
+ http_timeout=None, proxy_url=None):
super(KeystoneAuthProvider, self).__init__(credentials, scope)
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
self.trace_requests = trace_requests
self.http_timeout = http_timeout
+ self.proxy_url = proxy_url
self.auth_url = auth_url
self.auth_client = self._auth_client(auth_url)
@@ -345,7 +346,7 @@
return json_v2id.TokenClient(
auth_url, disable_ssl_certificate_validation=self.dscv,
ca_certs=self.ca_certs, trace_requests=self.trace_requests,
- http_timeout=self.http_timeout)
+ http_timeout=self.http_timeout, proxy_url=self.proxy_url)
def _auth_params(self):
"""Auth parameters to be passed to the token request
@@ -433,7 +434,7 @@
return json_v3id.V3TokenClient(
auth_url, disable_ssl_certificate_validation=self.dscv,
ca_certs=self.ca_certs, trace_requests=self.trace_requests,
- http_timeout=self.http_timeout)
+ http_timeout=self.http_timeout, proxy_url=self.proxy_url)
def _auth_params(self):
"""Auth parameters to be passed to the token request
@@ -599,7 +600,8 @@
def get_credentials(auth_url, fill_in=True, identity_version='v2',
disable_ssl_certificate_validation=None, ca_certs=None,
- trace_requests=None, http_timeout=None, **kwargs):
+ trace_requests=None, http_timeout=None, proxy_url=None,
+ **kwargs):
"""Builds a credentials object based on the configured auth_version
:param auth_url (string): Full URI of the OpenStack Identity API(Keystone)
@@ -617,6 +619,7 @@
:param trace_requests: trace in log API requests to the auth system
:param http_timeout: timeout in seconds to wait for the http request to
return
+ :param proxy_url: URL of HTTP(s) proxy used when fill_in is True
:param kwargs (dict): Dict of credential key/value pairs
Examples:
@@ -641,7 +644,7 @@
auth_provider = auth_provider_class(
creds, auth_url, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests,
- http_timeout=http_timeout)
+ http_timeout=http_timeout, proxy_url=proxy_url)
creds = auth_provider.fill_credentials()
return creds
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index 5468a7b..f39ecbc 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -93,10 +93,20 @@
:type insecure: boolean
:param prefix: prefix to insert before commands
:type prefix: string
+ :param user_domain_name: User's domain name
+ :type user_domain_name: string
+ :param user_domain_id: User's domain ID
+ :type user_domain_id: string
+ :param project_domain_name: Project's domain name
+ :type project_domain_name: string
+ :param project_domain_id: Project's domain ID
+ :type project_domain_id: string
"""
def __init__(self, username='', password='', tenant_name='', uri='',
- cli_dir='', insecure=False, prefix='', *args, **kwargs):
+ cli_dir='', insecure=False, prefix='', user_domain_name=None,
+ user_domain_id=None, project_domain_name=None,
+ project_domain_id=None, *args, **kwargs):
"""Initialize a new CLIClient object."""
super(CLIClient, self).__init__()
self.cli_dir = cli_dir if cli_dir else '/usr/bin'
@@ -106,6 +116,10 @@
self.uri = uri
self.insecure = insecure
self.prefix = prefix
+ self.user_domain_name = user_domain_name
+ self.user_domain_id = user_domain_id
+ self.project_domain_name = project_domain_name
+ self.project_domain_id = project_domain_id
def nova(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
@@ -366,6 +380,14 @@
self.tenant_name,
self.password,
self.uri))
+ if self.user_domain_name is not None:
+ creds += ' --os-user-domain-name %s' % self.user_domain_name
+ if self.user_domain_id is not None:
+ creds += ' --os-user-domain-id %s' % self.user_domain_id
+ if self.project_domain_name is not None:
+ creds += ' --os-project-domain-name %s' % self.project_domain_name
+ if self.project_domain_id is not None:
+ creds += ' --os-project-domain-id %s' % self.project_domain_id
if self.insecure:
flags = creds + ' --insecure ' + flags
else:
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index 1371b3c..bcb076b 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -120,3 +120,59 @@
api_microversion,
response_header))
raise exceptions.InvalidHTTPResponseHeader(msg)
+
+
+def compare_version_header_to_response(api_microversion_header_name,
+ api_microversion,
+ response_header,
+ operation='eq'):
+ """Compares API microversion in response header to ``api_microversion``.
+
+ Compare the ``api_microversion`` value in response header if microversion
+ header is present in response, otherwise return false.
+
+ To make this function work for APIs which do not return microversion
+ header in response (example compute v2.0), this function does *not* raise
+ InvalidHTTPResponseHeader.
+
+ :param api_microversion_header_name: Microversion header name. Example:
+ 'Openstack-Api-Version'.
+ :param api_microversion: Microversion number. Example:
+
+ * '2.10' for the old-style header name, 'X-OpenStack-Nova-API-Version'
+ * 'Compute 2.10' for the new-style header name, 'Openstack-Api-Version'
+
+ :param response_header: Response header where microversion is
+ expected to be present.
+ :param operation: The boolean operation to use to compare the
+ ``api_microversion`` to the microversion in ``response_header``.
+ Can be 'lt', 'eq', 'gt', 'le', 'ne', 'ge'. Default is 'eq'. The
+ operation type should be based on the order of the arguments:
+ ``api_microversion`` <operation> ``response_header`` microversion.
+ :returns: True if the comparison is logically true, else False if the
+ comparison is logically false or if ``api_microversion_header_name`` is
+ missing in the ``response_header``.
+ :raises InvalidParam: If the operation is not lt, eq, gt, le, ne or ge.
+ """
+ api_microversion_header_name = api_microversion_header_name.lower()
+ if api_microversion_header_name not in response_header:
+ return False
+
+ op = getattr(api_version_request.APIVersionRequest,
+ '__%s__' % operation, None)
+
+ if op is None:
+ msg = ("Operation %s is invalid. Valid options include: lt, eq, gt, "
+ "le, ne, ge." % operation)
+ raise exceptions.InvalidParam(invalid_param=msg)
+
+ # Remove "volume" from "volume <microversion>", for example, so that the
+ # microversion can be converted to `APIVersionRequest`.
+ api_version = api_microversion.split(' ')[-1]
+ resp_version = response_header[api_microversion_header_name].split(' ')[-1]
+ if not op(
+ api_version_request.APIVersionRequest(api_version),
+ api_version_request.APIVersionRequest(resp_version)):
+ return False
+
+ return True
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 1b450ab..42ed41b 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -22,8 +22,9 @@
@six.add_metaclass(abc.ABCMeta)
class CredentialProvider(object):
- def __init__(self, identity_version, name=None, network_resources=None,
- credentials_domain=None, admin_role=None):
+ def __init__(self, identity_version, name=None,
+ network_resources=None, credentials_domain=None,
+ admin_role=None, identity_uri=None):
"""A CredentialProvider supplies credentials to test classes.
:param identity_version: Identity version of the credentials provided
@@ -33,8 +34,11 @@
credentials
:param credentials_domain: Domain credentials belong to
:param admin_role: Name of the role of the admin account
+ :param identity_uri: Identity URI of the target cloud. This *must* be
+ specified for anything to work.
"""
self.identity_version = identity_version
+ self.identity_uri = identity_uri
self.name = name or "test_creds"
self.network_resources = network_resources
self.credentials_domain = credentials_domain or 'Default'
diff --git a/tempest/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
similarity index 79%
rename from tempest/common/dynamic_creds.py
rename to tempest/lib/common/dynamic_creds.py
index 88fe26c..4f1a883 100644
--- a/tempest/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -12,20 +12,59 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ipaddress
+
import netaddr
from oslo_log import log as logging
import six
-from tempest import clients
from tempest.lib.common import cred_client
from tempest.lib.common import cred_provider
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services import clients
LOG = logging.getLogger(__name__)
class DynamicCredentialProvider(cred_provider.CredentialProvider):
+ """Creates credentials dynamically for tests
+
+ A credential provider that, based on an initial set of
+ admin credentials, creates new credentials on the fly for
+ tests to use and then discard.
+
+ :param str identity_version: identity API version to use `v2` or `v3`
+ :param str admin_role: name of the admin role added to admin users
+ :param str name: names of dynamic resources include this parameter
+ when specified
+ :param str credentials_domain: name of the domain where the users
+ are created. If not defined, the project
+ domain from admin_credentials is used
+ :param dict network_resources: network resources to be created for
+ the created credentials
+ :param Credentials admin_creds: initial admin credentials
+ :param bool identity_admin_domain_scope: Set to true if admin should be
+ scoped to the domain. By
+ default this is False and the
+ admin role is scoped to the
+ project.
+ :param str identity_admin_role: The role name to use for admin
+ :param list extra_roles: A list of strings for extra roles that should
+ be assigned to all created users
+ :param bool neutron_available: Whether we are running in an environemnt
+ with neutron
+ :param bool create_networks: Whether dynamic project networks should be
+ created or not
+ :param project_network_cidr: The CIDR to use for created project
+ networks
+ :param project_network_mask_bits: The network mask bits to use for
+ created project networks
+ :param public_network_id: The id for the public network to use
+ :param identity_admin_endpoint_type: The endpoint type for identity
+ admin clients. Defaults to public.
+ :param identity_uri: Identity URI of the target cloud
+ """
def __init__(self, identity_version, name=None, network_resources=None,
credentials_domain=None, admin_role=None, admin_creds=None,
@@ -33,44 +72,12 @@
identity_admin_role='admin', extra_roles=None,
neutron_available=False, create_networks=True,
project_network_cidr=None, project_network_mask_bits=None,
- public_network_id=None, resource_prefix=None):
- """Creates credentials dynamically for tests
-
- A credential provider that, based on an initial set of
- admin credentials, creates new credentials on the fly for
- tests to use and then discard.
-
- :param str identity_version: identity API version to use `v2` or `v3`
- :param str admin_role: name of the admin role added to admin users
- :param str name: names of dynamic resources include this parameter
- when specified
- :param str credentials_domain: name of the domain where the users
- are created. If not defined, the project
- domain from admin_credentials is used
- :param dict network_resources: network resources to be created for
- the created credentials
- :param Credentials admin_creds: initial admin credentials
- :param bool identity_admin_domain_scope: Set to true if admin should be
- scoped to the domain. By
- default this is False and the
- admin role is scoped to the
- project.
- :param str identity_admin_role: The role name to use for admin
- :param list extra_roles: A list of strings for extra roles that should
- be assigned to all created users
- :param bool neutron_available: Whether we are running in an environemnt
- with neutron
- :param bool create_networks: Whether dynamic project networks should be
- created or not
- :param project_network_cidr: The CIDR to use for created project
- networks
- :param project_network_mask_bits: The network mask bits to use for
- created project networks
- :param public_network_id: The id for the public network to use
- """
+ public_network_id=None, resource_prefix=None,
+ identity_admin_endpoint_type='public', identity_uri=None):
super(DynamicCredentialProvider, self).__init__(
- identity_version=identity_version, admin_role=admin_role,
- name=name, credentials_domain=credentials_domain,
+ identity_version=identity_version, identity_uri=identity_uri,
+ admin_role=admin_role, name=name,
+ credentials_domain=credentials_domain,
network_resources=network_resources)
self.network_resources = network_resources
self._creds = {}
@@ -84,6 +91,7 @@
self.default_admin_creds = admin_creds
self.identity_admin_domain_scope = identity_admin_domain_scope
self.identity_admin_role = identity_admin_role or 'admin'
+ self.identity_admin_endpoint_type = identity_admin_endpoint_type
self.extra_roles = extra_roles or []
(self.identity_admin_client,
self.tenants_admin_client,
@@ -94,7 +102,8 @@
self.routers_admin_client,
self.subnets_admin_client,
self.ports_admin_client,
- self.security_groups_admin_client) = self._get_admin_clients()
+ self.security_groups_admin_client) = self._get_admin_clients(
+ identity_admin_endpoint_type)
# Domain where isolated credentials are provisioned (v3 only).
# Use that of the admin account is None is configured.
self.creds_domain_name = None
@@ -110,32 +119,43 @@
self.domains_admin_client,
self.creds_domain_name)
- def _get_admin_clients(self):
+ def _get_admin_clients(self, endpoint_type):
"""Returns a tuple with instances of the following admin clients
(in this order):
identity
network
"""
- os = clients.Manager(self.default_admin_creds)
+ os = clients.ServiceClients(self.default_admin_creds,
+ self.identity_uri)
+ params = {'endpoint_type': endpoint_type}
if self.identity_version == 'v2':
- return (os.identity_client, os.tenants_client, os.users_client,
- os.roles_client, None,
- os.networks_client, os.routers_client, os.subnets_client,
- os.ports_client, os.security_groups_client)
+ return (os.identity_v2.IdentityClient(**params),
+ os.identity_v2.TenantsClient(**params),
+ os.identity_v2.UsersClient(**params),
+ os.identity_v2.RolesClient(**params), None,
+ os.network.NetworksClient(),
+ os.network.RoutersClient(),
+ os.network.SubnetsClient(),
+ os.network.PortsClient(),
+ os.network.SecurityGroupsClient())
else:
# We use a dedicated client manager for identity client in case we
# need a different token scope for them.
scope = 'domain' if self.identity_admin_domain_scope else 'project'
- identity_os = clients.Manager(self.default_admin_creds,
- scope=scope)
- return (identity_os.identity_v3_client,
- identity_os.projects_client,
- identity_os.users_v3_client, identity_os.roles_v3_client,
- identity_os.domains_client,
- os.networks_client, os.routers_client,
- os.subnets_client, os.ports_client,
- os.security_groups_client)
+ identity_os = clients.ServiceClients(self.default_admin_creds,
+ self.identity_uri,
+ scope=scope)
+ return (identity_os.identity_v3.IdentityClient(**params),
+ identity_os.identity_v3.ProjectsClient(**params),
+ identity_os.identity_v3.UsersClient(**params),
+ identity_os.identity_v3.RolesClient(**params),
+ identity_os.identity_v3.DomainsClient(**params),
+ os.network.NetworksClient(),
+ os.network.RoutersClient(),
+ os.network.SubnetsClient(),
+ os.network.PortsClient(),
+ os.network.SecurityGroupsClient())
def _create_creds(self, admin=False, roles=None):
"""Create credentials with random name.
@@ -275,14 +295,16 @@
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
- ip_version=4)
+ ip_version=(ipaddress.ip_network(
+ six.text_type(subnet_cidr)).version))
else:
resp_body = self.subnets_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
- ip_version=4)
+ ip_version=(ipaddress.ip_network(
+ six.text_type(subnet_cidr)).version))
break
except lib_exc.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
@@ -429,7 +451,7 @@
creds.username)
# NOTE(zhufl): Only when neutron's security_group ext is
# enabled, _cleanup_default_secgroup will not raise error. But
- # here cannot use test.is_extension_enabled for it will cause
+ # here cannot use test_utils.is_extension_enabled for it will cause
# "circular dependency". So here just use try...except to
# ensure tenant deletion without big changes.
try:
diff --git a/tempest/common/fixed_network.py b/tempest/lib/common/fixed_network.py
similarity index 99%
rename from tempest/common/fixed_network.py
rename to tempest/lib/common/fixed_network.py
index 4032c90..e2054a4 100644
--- a/tempest/common/fixed_network.py
+++ b/tempest/lib/common/fixed_network.py
@@ -14,8 +14,8 @@
from oslo_log import log as logging
-from tempest import exceptions
from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
LOG = logging.getLogger(__name__)
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index 8a47d44..738c37f 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -17,6 +17,47 @@
import urllib3
+class ClosingProxyHttp(urllib3.ProxyManager):
+ def __init__(self, proxy_url, disable_ssl_certificate_validation=False,
+ ca_certs=None, timeout=None):
+ kwargs = {}
+
+ if disable_ssl_certificate_validation:
+ urllib3.disable_warnings()
+ kwargs['cert_reqs'] = 'CERT_NONE'
+ elif ca_certs:
+ kwargs['cert_reqs'] = 'CERT_REQUIRED'
+ kwargs['ca_certs'] = ca_certs
+
+ if timeout:
+ kwargs['timeout'] = timeout
+
+ super(ClosingProxyHttp, self).__init__(proxy_url, **kwargs)
+
+ def request(self, url, method, *args, **kwargs):
+
+ class Response(dict):
+ def __init__(self, info):
+ for key, value in info.getheaders().items():
+ self[key.lower()] = value
+ self.status = info.status
+ self['status'] = str(self.status)
+ self.reason = info.reason
+ self.version = info.version
+ self['content-location'] = url
+
+ original_headers = kwargs.get('headers', {})
+ new_headers = dict(original_headers, connection='close')
+ new_kwargs = dict(kwargs, headers=new_headers)
+
+ # Follow up to 5 redirections. Don't raise an exception if
+ # it's exceeded but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ r = super(ClosingProxyHttp, self).request(method, url, retries=retry,
+ *args, **new_kwargs)
+ return Response(r), r.data
+
+
class ClosingHttp(urllib3.poolmanager.PoolManager):
def __init__(self, disable_ssl_certificate_validation=False,
ca_certs=None, timeout=None):
@@ -25,8 +66,7 @@
if disable_ssl_certificate_validation:
urllib3.disable_warnings()
kwargs['cert_reqs'] = 'CERT_NONE'
-
- if ca_certs:
+ elif ca_certs:
kwargs['cert_reqs'] = 'CERT_REQUIRED'
kwargs['ca_certs'] = ca_certs
diff --git a/tempest/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
similarity index 88%
rename from tempest/common/preprov_creds.py
rename to tempest/lib/common/preprov_creds.py
index 8053cac..83db513 100644
--- a/tempest/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -20,12 +20,11 @@
import six
import yaml
-from tempest import clients
-from tempest.common import fixed_network
-from tempest import exceptions
from tempest.lib import auth
from tempest.lib.common import cred_provider
+from tempest.lib.common import fixed_network
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services import clients
LOG = logging.getLogger(__name__)
@@ -42,6 +41,35 @@
class PreProvisionedCredentialProvider(cred_provider.CredentialProvider):
+ """Credentials provider using pre-provisioned accounts
+
+ This credentials provider loads the details of pre-provisioned
+ accounts from a YAML file, in the format specified by
+ ``etc/accounts.yaml.sample``. It locks accounts while in use, using the
+ external locking mechanism, allowing for multiple python processes
+ to share a single account file, and thus running tests in parallel.
+
+ The accounts_lock_dir must be generated using `lockutils.get_lock_path`
+ from the oslo.concurrency library. For instance::
+
+ accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
+ 'test_accounts')
+
+ Role names for object storage are optional as long as the
+ `operator` and `reseller_admin` credential types are not used in the
+ accounts file.
+
+ :param identity_version: identity version of the credentials
+ :param admin_role: name of the admin role
+ :param test_accounts_file: path to the accounts YAML file
+ :param accounts_lock_dir: the directory for external locking
+ :param name: name of the hash file (optional)
+ :param credentials_domain: name of the domain credentials belong to
+ (if no domain is configured)
+ :param object_storage_operator_role: name of the role
+ :param object_storage_reseller_admin_role: name of the role
+ :param identity_uri: Identity URI of the target cloud
+ """
# Exclude from the hash fields specific to v2 or v3 identity API
# i.e. only include user*, project*, tenant* and password
@@ -51,38 +79,11 @@
def __init__(self, identity_version, test_accounts_file,
accounts_lock_dir, name=None, credentials_domain=None,
admin_role=None, object_storage_operator_role=None,
- object_storage_reseller_admin_role=None):
- """Credentials provider using pre-provisioned accounts
-
- This credentials provider loads the details of pre-provisioned
- accounts from a YAML file, in the format specified by
- `etc/accounts.yaml.sample`. It locks accounts while in use, using the
- external locking mechanism, allowing for multiple python processes
- to share a single account file, and thus running tests in parallel.
-
- The accounts_lock_dir must be generated using `lockutils.get_lock_path`
- from the oslo.concurrency library. For instance:
-
- accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
- 'test_accounts')
-
- Role names for object storage are optional as long as the
- `operator` and `reseller_admin` credential types are not used in the
- accounts file.
-
- :param identity_version: identity version of the credentials
- :param admin_role: name of the admin role
- :param test_accounts_file: path to the accounts YAML file
- :param accounts_lock_dir: the directory for external locking
- :param name: name of the hash file (optional)
- :param credentials_domain: name of the domain credentials belong to
- (if no domain is configured)
- :param object_storage_operator_role: name of the role
- :param object_storage_reseller_admin_role: name of the role
- """
+ object_storage_reseller_admin_role=None, identity_uri=None):
super(PreProvisionedCredentialProvider, self).__init__(
identity_version=identity_version, name=name,
- admin_role=admin_role, credentials_domain=credentials_domain)
+ admin_role=admin_role, credentials_domain=credentials_domain,
+ identity_uri=identity_uri)
self.test_accounts_file = test_accounts_file
if test_accounts_file:
accounts = read_accounts_yaml(self.test_accounts_file)
@@ -341,13 +342,14 @@
auth_url=None, fill_in=False,
identity_version=self.identity_version, **creds_dict)
net_creds = cred_provider.TestResources(credential)
- net_clients = clients.Manager(credentials=credential)
- compute_network_client = net_clients.compute_networks_client
+ net_clients = clients.ServiceClients(credentials=credential,
+ identity_uri=self.identity_uri)
+ compute_network_client = net_clients.compute.NetworksClient()
net_name = self.hash_dict['networks'].get(hash, None)
try:
network = fixed_network.get_network_from_name(
net_name, compute_network_client)
- except exceptions.InvalidTestResource:
+ except lib_exc.InvalidTestResource:
network = {}
net_creds.set_resources(network=network)
return net_creds
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 63cf07f..22276d4 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -69,6 +69,7 @@
of the request and response payload
:param str http_timeout: Timeout in seconds to wait for the http request to
return
+ :param str proxy_url: http proxy url to use.
"""
# The version of the API this client implements
@@ -80,7 +81,8 @@
endpoint_type='publicURL',
build_interval=1, build_timeout=60,
disable_ssl_certificate_validation=False, ca_certs=None,
- trace_requests='', name=None, http_timeout=None):
+ trace_requests='', name=None, http_timeout=None,
+ proxy_url=None):
self.auth_provider = auth_provider
self.service = service
self.region = region
@@ -100,9 +102,16 @@
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = disable_ssl_certificate_validation
- self.http_obj = http.ClosingHttp(
- disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
- timeout=http_timeout)
+
+ if proxy_url:
+ self.http_obj = http.ClosingProxyHttp(
+ proxy_url,
+ disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+ timeout=http_timeout)
+ else:
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+ timeout=http_timeout)
def get_headers(self, accept_type=None, send_type=None):
"""Return the default headers which will be used with outgoing requests
@@ -371,7 +380,7 @@
on the endpoint in the catalog will return a list of supported API
versions.
- :return tuple with response headers and list of version numbers
+ :return: tuple with response headers and list of version numbers
:rtype: tuple
"""
resp, body = self.get('')
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index a0941ef..c5df590 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -18,9 +18,6 @@
import string
import uuid
-from debtcollector import removals
-import netaddr
-from oslo_utils import netutils
from oslo_utils import uuidutils
import six.moves
@@ -177,36 +174,6 @@
for i in range(size)])
-@removals.remove(
- message="use get_ipv6_addr_by_EUI64 from oslo_utils.netutils",
- version="Newton",
- removal_version="Ocata")
-def get_ipv6_addr_by_EUI64(cidr, mac):
- """Generate a IPv6 addr by EUI-64 with CIDR and MAC
-
- :param str cidr: a IPv6 CIDR
- :param str mac: a MAC address
- :return: an IPv6 Address
- :rtype: netaddr.IPAddress
- """
- # Check if the prefix is IPv4 address
- is_ipv4 = netutils.is_valid_ipv4(cidr)
- if is_ipv4:
- msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
- raise TypeError(msg)
- try:
- eui64 = int(netaddr.EUI(mac).eui64())
- prefix = netaddr.IPNetwork(cidr)
- return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
- except (ValueError, netaddr.AddrFormatError):
- raise TypeError('Bad prefix or mac format for generating IPv6 '
- 'address by EUI-64: %(prefix)s, %(mac)s:'
- % {'prefix': cidr, 'mac': mac})
- except TypeError:
- raise TypeError('Bad prefix type for generate IPv6 address by '
- 'EUI-64: %s' % cidr)
-
-
# Courtesy of http://stackoverflow.com/a/312464
def chunkify(sequence, chunksize):
"""Yield successive chunks from `sequence`."""
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index aef2ff3..cd4092b 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -67,7 +67,7 @@
def __init__(self, ip_address, username, password=None, pkey=None,
server=None, servers_client=None, ssh_timeout=300,
connect_timeout=60, console_output_enabled=True,
- ssh_shell_prologue="set -eu -o pipefail; PATH=$$PATH:/sbin;",
+ ssh_shell_prologue="set -eu -o pipefail; PATH=$PATH:/sbin;",
ping_count=1, ping_size=56):
"""Executes commands in a VM over ssh
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index bd0db7c..c2e93ee 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -86,22 +86,29 @@
pass
-def call_until_true(func, duration, sleep_for):
+def call_until_true(func, duration, sleep_for, *args, **kwargs):
"""Call the given function until it returns True (and return True)
or until the specified duration (in seconds) elapses (and return False).
- :param func: A zero argument callable that returns True on success.
+ :param func: A callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
+ :param args: args that are passed to func.
+ :param kwargs: kwargs that are passed to func.
"""
now = time.time()
+ begin_time = now
timeout = now + duration
while now < timeout:
- if func():
+ if func(*args, **kwargs):
+ LOG.debug("Call %s returns true in %f seconds",
+ getattr(func, '__name__'), time.time() - begin_time)
return True
time.sleep(sleep_for)
now = time.time()
+ LOG.debug("Call %s returns false in %f seconds",
+ getattr(func, '__name__'), duration)
return False
diff --git a/tempest/lib/common/validation_resources.py b/tempest/lib/common/validation_resources.py
new file mode 100644
index 0000000..c35a01a
--- /dev/null
+++ b/tempest/lib/common/validation_resources.py
@@ -0,0 +1,457 @@
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2017 IBM Corp.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
+
+LOG = logging.getLogger(__name__)
+
+
+def _network_service(clients, use_neutron):
+ # Internal helper to select the right network clients
+ if use_neutron:
+ return clients.network
+ else:
+ return clients.compute
+
+
+def create_ssh_security_group(clients, add_rule=False, ethertype='IPv4',
+ use_neutron=True):
+ """Create a security group for ping/ssh testing
+
+ Create a security group to be attached to a VM using the nova or neutron
+ clients. If rules are added, the group can be attached to a VM to enable
+ connectivity validation over ICMP and further testing over SSH.
+
+ :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+ or of a subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param add_rule: Whether security group rules are provisioned or not.
+ Defaults to `False`.
+ :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
+ :param use_neutron: When True resources are provisioned via neutron, when
+ False resources are provisioned via nova.
+ :returns: A dictionary with the security group as returned by the API.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+
+ creds = auth.get_credentials('http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+ osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+ # Security group for IPv4 tests
+ sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
+ # Security group for IPv6 tests
+ sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
+ add_rule=True)
+ """
+ network_service = _network_service(clients, use_neutron)
+ security_groups_client = network_service.SecurityGroupsClient()
+ security_group_rules_client = network_service.SecurityGroupRulesClient()
+ # Security Group clients for nova and neutron behave the same
+ sg_name = data_utils.rand_name('securitygroup-')
+ sg_description = data_utils.rand_name('description-')
+ security_group = security_groups_client.create_security_group(
+ name=sg_name, description=sg_description)['security_group']
+ # Security Group Rules clients require different parameters depending on
+ # the network service in use
+ if add_rule:
+ try:
+ if use_neutron:
+ security_group_rules_client.create_security_group_rule(
+ security_group_id=security_group['id'],
+ protocol='tcp',
+ ethertype=ethertype,
+ port_range_min=22,
+ port_range_max=22,
+ direction='ingress')
+ security_group_rules_client.create_security_group_rule(
+ security_group_id=security_group['id'],
+ protocol='icmp',
+ ethertype=ethertype,
+ direction='ingress')
+ else:
+ security_group_rules_client.create_security_group_rule(
+ parent_group_id=security_group['id'], ip_protocol='tcp',
+ from_port=22, to_port=22)
+ security_group_rules_client.create_security_group_rule(
+ parent_group_id=security_group['id'], ip_protocol='icmp',
+ from_port=-1, to_port=-1)
+ except Exception as sgc_exc:
+ # If adding security group rules fails, we cleanup the SG before
+ # re-raising the failure up
+ with excutils.save_and_reraise_exception():
+ try:
+ msg = ('Error while provisioning security group rules in '
+ 'security group %s. Trying to cleanup.')
+ # The exceptions logging is already handled, so using
+ # debug here just to provide more context
+ LOG.debug(msg, sgc_exc)
+ clear_validation_resources(
+ clients, keypair=None, floating_ip=None,
+ security_group=security_group,
+ use_neutron=use_neutron)
+ except Exception as cleanup_exc:
+ msg = ('Error during cleanup of a security group. '
+ 'The cleanup was triggered by an exception during '
+ 'the provisioning of security group rules.\n'
+ 'Provisioning exception: %s\n'
+ 'First cleanup exception: %s')
+ LOG.exception(msg, sgc_exc, cleanup_exc)
+ LOG.debug("SSH Validation resource security group with tcp and icmp "
+ "rules %s created", sg_name)
+ return security_group
+
+
+def create_validation_resources(clients, keypair=False, floating_ip=False,
+ security_group=False,
+ security_group_rules=False,
+ ethertype='IPv4', use_neutron=True,
+ floating_network_id=None,
+ floating_network_name=None):
+ """Provision resources for VM ping/ssh testing
+
+ Create resources required to be able to ping / ssh a virtual machine:
+ keypair, security group, security group rules and a floating IP.
+ Which of those resources are required may depend on the cloud setup and on
+ the specific test and it can be controlled via the corresponding
+ arguments.
+
+ Provisioned resources are returned in a dictionary.
+
+ :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+ or of a subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param keypair: Whether to provision a keypair. Defaults to False.
+ :param floating_ip: Whether to provision a floating IP. Defaults to False.
+ :param security_group: Whether to provision a security group. Defaults to
+ False.
+ :param security_group_rules: Whether to provision security group rules.
+ Defaults to False.
+ :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
+ :param use_neutron: When True resources are provisioned via neutron, when
+ False resources are provisioned via nova.
+ :param floating_network_id: The id of the network used to provision a
+ floating IP. Only used if a floating IP is requested and with neutron.
+ :param floating_network_name: The name of the floating IP pool used to
+ provision the floating IP. Only used if a floating IP is requested and
+ with nova-net.
+ :returns: A dictionary with the resources in the format they are returned
+ by the API. Valid keys are 'keypair', 'floating_ip' and
+ 'security_group'.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+
+ creds = auth.get_credentials('http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+ osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+ # Request keypair and floating IP
+ resources = dict(keypair=True, security_group=False,
+ security_group_rules=False, floating_ip=True)
+ resources = vr.create_validation_resources(
+ osclients, use_neutron=True,
+ floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
+ **resources)
+
+ # The floating IP to be attached to the VM
+ floating_ip = resources['floating_ip']['ip']
+ """
+ # Create and Return the validation resources required to validate a VM
+ msg = ('Requested validation resources keypair %s, floating IP %s, '
+ 'security group %s')
+ LOG.debug(msg, keypair, floating_ip, security_group)
+ validation_data = {}
+ try:
+ if keypair:
+ keypair_name = data_utils.rand_name('keypair')
+ validation_data.update(
+ clients.compute.KeyPairsClient().create_keypair(
+ name=keypair_name))
+ LOG.debug("Validation resource key %s created", keypair_name)
+ if security_group:
+ validation_data['security_group'] = create_ssh_security_group(
+ clients, add_rule=security_group_rules,
+ use_neutron=use_neutron, ethertype=ethertype)
+ if floating_ip:
+ floating_ip_client = _network_service(
+ clients, use_neutron).FloatingIPsClient()
+ if use_neutron:
+ floatingip = floating_ip_client.create_floatingip(
+ floating_network_id=floating_network_id)
+ # validation_resources['floating_ip'] has historically looked
+ # like a compute API POST /os-floating-ips response, so we need
+ # to mangle it a bit for a Neutron response with different
+ # fields.
+ validation_data['floating_ip'] = floatingip['floatingip']
+ validation_data['floating_ip']['ip'] = (
+ floatingip['floatingip']['floating_ip_address'])
+ else:
+ # NOTE(mriedem): The os-floating-ips compute API was deprecated
+ # in the 2.36 microversion. Any tests for CRUD operations on
+ # floating IPs using the compute API should be capped at 2.35.
+ validation_data.update(floating_ip_client.create_floating_ip(
+ pool=floating_network_name))
+ LOG.debug("Validation resource floating IP %s created",
+ validation_data['floating_ip'])
+ except Exception as prov_exc:
+ # If something goes wrong, cleanup as much as possible before we
+ # re-raise the exception
+ with excutils.save_and_reraise_exception():
+ if validation_data:
+ # Cleanup may fail as well
+ try:
+ msg = ('Error while provisioning validation resources %s. '
+ 'Trying to cleanup what we provisioned so far: %s')
+ # The exceptions logging is already handled, so using
+ # debug here just to provide more context
+ LOG.debug(msg, prov_exc, str(validation_data))
+ clear_validation_resources(
+ clients,
+ keypair=validation_data.get('keypair', None),
+ floating_ip=validation_data.get('floating_ip', None),
+ security_group=validation_data.get('security_group',
+ None),
+ use_neutron=use_neutron)
+ except Exception as cleanup_exc:
+ msg = ('Error during cleanup of validation resources. '
+ 'The cleanup was triggered by an exception during '
+ 'the provisioning step.\n'
+ 'Provisioning exception: %s\n'
+ 'First cleanup exception: %s')
+ LOG.exception(msg, prov_exc, cleanup_exc)
+ return validation_data
+
+
+def clear_validation_resources(clients, keypair=None, floating_ip=None,
+ security_group=None, use_neutron=True):
+ """Cleanup resources for VM ping/ssh testing
+
+ Cleanup a set of resources provisioned via `create_validation_resources`.
+ In case of errors during cleanup, the exception is logged and the cleanup
+ process is continued. The first exception that was raised is re-raised
+ after the cleanup is complete.
+
+ :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+ or of a subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param keypair: A dictionary with the keypair to be deleted. Defaults to
+ None.
+ :param floating_ip: A dictionary with the floating_ip to be deleted.
+ Defaults to None.
+ :param security_group: A dictionary with the security_group to be deleted.
+ Defaults to None.
+ :param use_neutron: When True resources are provisioned via neutron, when
+ False resources are provisioned via nova.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+
+ creds = auth.get_credentials('http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+ osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+ # Request keypair and floating IP
+ resources = dict(keypair=True, security_group=False,
+ security_group_rules=False, floating_ip=True)
+ resources = vr.create_validation_resources(
+ osclients, validation_resources=resources, use_neutron=True,
+ floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C')
+
+ # Now cleanup the resources
+ try:
+ vr.clear_validation_resources(osclients, use_neutron=True,
+ **resources)
+ except Exception as e:
+ LOG.exception('Something went wrong during cleanup, ignoring')
+ """
+ has_exception = None
+ if keypair:
+ keypair_client = clients.compute.KeyPairsClient()
+ keypair_name = keypair['name']
+ try:
+ keypair_client.delete_keypair(keypair_name)
+ except lib_exc.NotFound:
+ LOG.warning(
+ "Keypair %s is not found when attempting to delete",
+ keypair_name
+ )
+ except Exception as exc:
+ LOG.exception('Exception raised while deleting key %s',
+ keypair_name)
+ if not has_exception:
+ has_exception = exc
+ network_service = _network_service(clients, use_neutron)
+ if security_group:
+ security_group_client = network_service.SecurityGroupsClient()
+ sec_id = security_group['id']
+ try:
+ security_group_client.delete_security_group(sec_id)
+ security_group_client.wait_for_resource_deletion(sec_id)
+ except lib_exc.NotFound:
+ LOG.warning("Security group %s is not found when attempting "
+ "to delete", sec_id)
+ except lib_exc.Conflict as exc:
+ LOG.exception('Conflict while deleting security '
+ 'group %s VM might not be deleted', sec_id)
+ if not has_exception:
+ has_exception = exc
+ except Exception as exc:
+ LOG.exception('Exception raised while deleting security '
+ 'group %s', sec_id)
+ if not has_exception:
+ has_exception = exc
+ if floating_ip:
+ floating_ip_client = network_service.FloatingIPsClient()
+ fip_id = floating_ip['id']
+ try:
+ if use_neutron:
+ floating_ip_client.delete_floatingip(fip_id)
+ else:
+ floating_ip_client.delete_floating_ip(fip_id)
+ except lib_exc.NotFound:
+ LOG.warning('Floating ip %s not found while attempting to '
+ 'delete', fip_id)
+ except Exception as exc:
+ LOG.exception('Exception raised while deleting ip %s', fip_id)
+ if not has_exception:
+ has_exception = exc
+ if has_exception:
+ raise has_exception
+
+
+class ValidationResourcesFixture(fixtures.Fixture):
+ """Fixture to provision and cleanup validation resources"""
+
+ DICT_KEYS = ['keypair', 'security_group', 'floating_ip']
+
+ def __init__(self, clients, keypair=False, floating_ip=False,
+ security_group=False, security_group_rules=False,
+ ethertype='IPv4', use_neutron=True, floating_network_id=None,
+ floating_network_name=None):
+ """Create a ValidationResourcesFixture
+
+ Create a ValidationResourcesFixture fixtures, which provisions the
+ resources required to be able to ping / ssh a virtual machine upon
+ setUp and clears them out upon cleanup. Resources are keypair,
+ security group, security group rules and a floating IP - depending
+ on the params.
+
+ The fixture exposes a dictionary that includes provisioned resources.
+
+ :param clients: `tempest.lib.services.clients.ServiceClients` or of a
+ subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param keypair: Whether to provision a keypair. Defaults to False.
+ :param floating_ip: Whether to provision a floating IP.
+ Defaults to False.
+ :param security_group: Whether to provision a security group.
+ Defaults to False.
+ :param security_group_rules: Whether to provision security group rules.
+ Defaults to False.
+ :param ethertype: 'IPv4' or 'IPv6'. Honoured only if neutron is used.
+ :param use_neutron: When True resources are provisioned via neutron,
+ when False resources are provisioned via nova.
+ :param floating_network_id: The id of the network used to provision a
+ floating IP. Only used if a floating IP is requested in case
+ neutron is used.
+ :param floating_network_name: The name of the floating IP pool used to
+ provision the floating IP. Only used if a floating IP is requested
+ and with nova-net.
+ :returns: A dictionary with the same keys as the input
+ `validation_resources` and the resources for values in the format
+ they are returned by the API.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+ import testtools
+
+
+ class TestWithVR(testtools.TestCase):
+
+ def setUp(self):
+ creds = auth.get_credentials(
+ 'http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+
+ osclients = clients.ServiceClients(
+ creds, 'http://mycloud/identity/v3')
+ # Request keypair and floating IP
+ resources = dict(keypair=True, security_group=False,
+ security_group_rules=False,
+ floating_ip=True)
+ network_id = '4240E68E-23DA-4C82-AC34-9FEFAA24521C'
+ self.vr = self.useFixture(vr.ValidationResourcesFixture(
+ osclients, use_neutron=True,
+ floating_network_id=network_id,
+ **resources)
+
+ def test_use_ip(self):
+ # The floating IP to be attached to the VM
+ floating_ip = self.vr['floating_ip']['ip']
+ """
+ self._clients = clients
+ self._keypair = keypair
+ self._floating_ip = floating_ip
+ self._security_group = security_group
+ self._security_group_rules = security_group_rules
+ self._ethertype = ethertype
+ self._use_neutron = use_neutron
+ self._floating_network_id = floating_network_id
+ self._floating_network_name = floating_network_name
+ self._validation_resources = None
+
+ def _setUp(self):
+ msg = ('Requested setup of ValidationResources keypair %s, floating '
+ 'IP %s, security group %s')
+ LOG.debug(msg, self._keypair, self._floating_ip, self._security_group)
+ self._validation_resources = create_validation_resources(
+ self._clients, keypair=self._keypair,
+ floating_ip=self._floating_ip,
+ security_group=self._security_group,
+ security_group_rules=self._security_group_rules,
+ ethertype=self._ethertype, use_neutron=self._use_neutron,
+ floating_network_id=self._floating_network_id,
+ floating_network_name=self._floating_network_name)
+ # If provisioning raises an exception we won't have anything to
+ # cleanup here, so we don't need a try-finally around provisioning
+ vr = self._validation_resources
+ self.addCleanup(clear_validation_resources, self._clients,
+ keypair=vr.get('keypair', None),
+ floating_ip=vr.get('floating_ip', None),
+ security_group=vr.get('security_group', None),
+ use_neutron=self._use_neutron)
+
+ @property
+ def resources(self):
+ return self._validation_resources
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index f82f707..e99dd24 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -15,7 +15,6 @@
import functools
import uuid
-import debtcollector.removals
from oslo_log import log as logging
import six
import testtools
@@ -31,7 +30,7 @@
"""
def decorator(f):
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
skip = False
if "condition" in kwargs:
if kwargs["condition"] is True:
@@ -43,7 +42,7 @@
raise ValueError('bug must be a valid bug number')
msg = "Skipped until Bug: %s is resolved." % kwargs["bug"]
raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
return wrapper
return decorator
@@ -56,9 +55,9 @@
"""
def decorator(f):
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
try:
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
except Exception as exc:
exc_status_code = getattr(exc, 'status_code', None)
if status_code is None or status_code == exc_status_code:
@@ -87,25 +86,6 @@
return decorator
-@debtcollector.removals.remove(removal_version='Queen')
-class skip_unless_attr(object):
- """Decorator to skip tests if a specified attr does not exists or False"""
- def __init__(self, attr, msg=None):
- self.attr = attr
- self.message = msg or ("Test case attribute %s not found "
- "or False") % attr
-
- def __call__(self, func):
- @functools.wraps(func)
- def _skipper(*args, **kw):
- """Wrapped skipper function."""
- testobj = args[0]
- if not getattr(testobj, self.attr, False):
- raise testtools.TestCase.skipException(self.message)
- func(*args, **kw)
- return _skipper
-
-
def attr(**kwargs):
"""A decorator which applies the testtools attr decorator
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 68ce57a..13af890 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -44,6 +44,9 @@
def __str__(self):
return self._error_string
+ def __repr__(self):
+ return self._error_string
+
class RestClientException(TempestException,
testtools.TestCase.failureException):
@@ -93,7 +96,7 @@
class Conflict(ClientRestClientException):
status_code = 409
- message = "An object with that identifier already exists"
+ message = "Conflict with state of target resource"
class Gone(ClientRestClientException):
@@ -269,3 +272,11 @@
class DeleteErrorException(TempestException):
message = ("Resource %(resource_id)s failed to delete "
"and is in ERROR status")
+
+
+class InvalidTestResource(TempestException):
+ message = "%(name)s is not a valid %(type)s, or the name is ambiguous"
+
+
+class InvalidParam(TempestException):
+ message = ("Invalid Parameter passed: %(invalid_param)s")
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index cd3bab0..8918a8c 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -17,10 +17,12 @@
import copy
import importlib
import inspect
+import sys
import warnings
from debtcollector import removals
from oslo_log import log as logging
+import testtools
from tempest.lib import auth
from tempest.lib.common.utils import misc
@@ -29,6 +31,7 @@
from tempest.lib.services import identity
from tempest.lib.services import image
from tempest.lib.services import network
+from tempest.lib.services import object_storage
from tempest.lib.services import volume
warnings.simplefilter("once")
@@ -48,20 +51,13 @@
'image.v1': image.v1,
'image.v2': image.v2,
'network': network,
+ 'object-storage': object_storage,
'volume.v1': volume.v1,
'volume.v2': volume.v2,
'volume.v3': volume.v3
}
-def _tempest_internal_modules():
- # Set of unstable service clients available in Tempest
- # NOTE(andreaf) This list will exists only as long the remain clients
- # are migrated to tempest.lib, and it will then be deleted without
- # deprecation or advance notice
- return set(['object-storage'])
-
-
def available_modules():
"""Set of service client modules available in Tempest and plugins
@@ -85,6 +81,7 @@
extra_service_versions = set([])
_tempest_modules = set(tempest_modules())
plugin_services = ClientsRegistry().get_service_clients()
+ name_conflicts = []
for plugin_name in plugin_services:
plug_service_versions = set([x['service_version'] for x in
plugin_services[plugin_name]])
@@ -96,20 +93,14 @@
'claimed by another one' % (plugin_name,
extra_service_versions &
plug_service_versions))
- raise exceptions.PluginRegistrationException(
- name=plugin_name, detailed_error=detailed_error)
- # NOTE(andreaf) Once all tempest clients are stable, the following
- # if will have to be removed.
- if not plug_service_versions.isdisjoint(
- _tempest_internal_modules()):
- detailed_error = (
- 'Plugin %s is trying to register a service %s already '
- 'claimed by a Tempest one' % (plugin_name,
- _tempest_internal_modules() &
- plug_service_versions))
- raise exceptions.PluginRegistrationException(
- name=plugin_name, detailed_error=detailed_error)
+ name_conflicts.append(exceptions.PluginRegistrationException(
+ name=plugin_name, detailed_error=detailed_error))
extra_service_versions |= plug_service_versions
+ if name_conflicts:
+ LOG.error(
+ 'Failed to list available modules due to name conflicts: %s',
+ name_conflicts)
+ raise testtools.MultipleExceptions(*name_conflicts)
return _tempest_modules | extra_service_versions
@@ -161,7 +152,7 @@
:param kwargs: Parameters to be passed to all clients. Parameters
values can be overwritten when clients are initialised, but
parameters cannot be deleted.
- :raise ImportError if the specified module_path cannot be imported
+ :raise ImportError: if the specified module_path cannot be imported
Example::
@@ -268,7 +259,7 @@
@removals.removed_kwarg('client_parameters')
def __init__(self, credentials, identity_uri, region=None, scope='project',
disable_ssl_certificate_validation=True, ca_certs=None,
- trace_requests='', client_parameters=None):
+ trace_requests='', client_parameters=None, proxy_url=None):
"""Service Clients provider
Instantiate a `ServiceClients` object, from a set of credentials and an
@@ -328,6 +319,8 @@
name, as declared in `service_clients.available_modules()` except
for the version. Values are dictionaries of parameters that are
going to be passed to all clients in the service client module.
+ :param proxy_url: Applies to auth and to all service clients, set a
+ proxy url for the clients to use.
"""
self._registered_services = set([])
self.credentials = credentials
@@ -352,16 +345,20 @@
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
self.trace_requests = trace_requests
+ self.proxy_url = proxy_url
# Creates an auth provider for the credentials
self.auth_provider = auth_provider_class(
self.credentials, self.identity_uri, scope=scope,
disable_ssl_certificate_validation=self.dscv,
- ca_certs=self.ca_certs, trace_requests=self.trace_requests)
+ ca_certs=self.ca_certs, trace_requests=self.trace_requests,
+ proxy_url=proxy_url)
+
# Setup some defaults for client parameters of registered services
client_parameters = client_parameters or {}
self.parameters = {}
+
# Parameters are provided for unversioned services
- all_modules = available_modules() | _tempest_internal_modules()
+ all_modules = available_modules()
unversioned_services = set(
[x.split('.')[0] for x in all_modules])
for service in unversioned_services:
@@ -375,6 +372,7 @@
# Register service clients from the registry (__tempest__ and plugins)
clients_registry = ClientsRegistry()
plugin_service_clients = clients_registry.get_service_clients()
+ registration_errors = []
for plugin in plugin_service_clients:
service_clients = plugin_service_clients[plugin]
# Each plugin returns a list of service client parameters
@@ -385,10 +383,12 @@
try:
self.register_service_client_module(**service_client)
except Exception:
+ registration_errors.append(sys.exc_info())
LOG.exception(
'Failed to register service client from plugin %s '
'with parameters %s', plugin, service_client)
- raise
+ if registration_errors:
+ raise testtools.MultipleExceptions(*registration_errors)
def register_service_client_module(self, name, service_version,
module_path, client_names, **kwargs):
@@ -409,8 +409,8 @@
clients in tempest.
:param client_names: List or set of names of service client classes.
:param kwargs: Extra optional parameters to be passed to all clients.
- ServiceClient provides defaults for region, dscv, ca_certs and
- trace_requests.
+ ServiceClient provides defaults for region, dscv, ca_certs, http
+ proxies and trace_requests.
:raise ServiceClientRegistrationException: if the provided name is
already in use or if service_version is already registered.
:raise ImportError: if module_path cannot be imported.
@@ -431,7 +431,8 @@
params = dict(region=self.region,
disable_ssl_certificate_validation=self.dscv,
ca_certs=self.ca_certs,
- trace_requests=self.trace_requests)
+ trace_requests=self.trace_requests,
+ proxy_url=self.proxy_url)
params.update(kwargs)
# Instantiate the client factory
_factory = ClientsFactory(module_path=module_path,
@@ -445,9 +446,7 @@
@property
def registered_services(self):
- # NOTE(andreaf) Once all tempest modules are stable this needs to
- # be updated to remove _tempest_internal_modules
- return self._registered_services | _tempest_internal_modules()
+ return self._registered_services
def _setup_parameters(self, parameters):
"""Setup default values for client parameters
diff --git a/tempest/lib/services/compute/services_client.py b/tempest/lib/services/compute/services_client.py
index 77ac82f..b046c35 100644
--- a/tempest/lib/services/compute/services_client.py
+++ b/tempest/lib/services/compute/services_client.py
@@ -18,12 +18,18 @@
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import services as schema
+from tempest.lib.api_schema.response.compute.v2_11 import services \
+ as schemav211
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class ServicesClient(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.10', 'schema': schema},
+ {'min': '2.11', 'max': None, 'schema': schemav211}]
+
def list_services(self, **params):
"""Lists all running Compute services for a tenant.
@@ -37,7 +43,8 @@
resp, body = self.get(url)
body = json.loads(body)
- self.validate_response(schema.list_services, resp, body)
+ _schema = self.get_schema(self.schema_versions_info)
+ self.validate_response(_schema.list_services, resp, body)
return rest_client.ResponseBody(resp, body)
def enable_service(self, **kwargs):
@@ -65,3 +72,31 @@
body = json.loads(body)
self.validate_response(schema.enable_disable_service, resp, body)
return rest_client.ResponseBody(resp, body)
+
+ def disable_log_reason(self, **kwargs):
+ """Disables scheduling for a Compute service and logs reason.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/compute/#disable-scheduling-for-a-compute-service-and-log-disabled-reason
+ """
+ post_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable-log-reason', post_body)
+ body = json.loads(body)
+ self.validate_response(schema.disable_log_reason, resp, body)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_forced_down(self, **kwargs):
+ """Set or unset ``forced_down`` flag for the service.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/compute/#update-forced-down
+ """
+ post_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/force-down', post_body)
+ body = json.loads(body)
+ # NOTE: Use schemav211.update_forced_down directly because there is no
+ # update_forced_down schema for <2.11.
+ self.validate_response(schemav211.update_forced_down, resp, body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index e271a58..a539d08 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations under
# the License.
+from tempest.lib.services.identity.v3.catalog_client import \
+ CatalogClient
from tempest.lib.services.identity.v3.credentials_client import \
CredentialsClient
from tempest.lib.services.identity.v3.domain_configuration_client \
@@ -42,10 +44,11 @@
from tempest.lib.services.identity.v3.users_client import UsersClient
from tempest.lib.services.identity.v3.versions_client import VersionsClient
-__all__ = ['CredentialsClient', 'DomainsClient', 'DomainConfigurationClient',
- 'EndPointGroupsClient', 'EndPointsClient', 'EndPointsFilterClient',
- 'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
- 'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
- 'ProjectsClient', 'RegionsClient', 'RoleAssignmentsClient',
- 'RolesClient', 'ServicesClient', 'V3TokenClient', 'TrustsClient',
- 'UsersClient', 'VersionsClient']
+__all__ = ['CatalogClient', 'CredentialsClient', 'DomainsClient',
+ 'DomainConfigurationClient', 'EndPointGroupsClient',
+ 'EndPointsClient', 'EndPointsFilterClient', 'GroupsClient',
+ 'IdentityClient', 'InheritedRolesClient', 'OAUTHConsumerClient',
+ 'OAUTHTokenClient', 'PoliciesClient', 'ProjectsClient',
+ 'RegionsClient', 'RoleAssignmentsClient', 'RolesClient',
+ 'ServicesClient', 'V3TokenClient', 'TrustsClient', 'UsersClient',
+ 'VersionsClient']
diff --git a/tempest/services/object_storage/capabilities_client.py b/tempest/lib/services/identity/v3/catalog_client.py
similarity index 71%
copy from tempest/services/object_storage/capabilities_client.py
copy to tempest/lib/services/identity/v3/catalog_client.py
index 0fe437f..232b85a 100644
--- a/tempest/services/object_storage/capabilities_client.py
+++ b/tempest/lib/services/identity/v3/catalog_client.py
@@ -1,6 +1,3 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,19 +10,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""
+https://developer.openstack.org/api-ref/identity/v3/index.html#get-service-catalog
+"""
+
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
-class CapabilitiesClient(rest_client.RestClient):
+class CatalogClient(rest_client.RestClient):
+ api_version = "v3"
- def list_capabilities(self):
- self.skip_path()
- try:
- resp, body = self.get('info')
- finally:
- self.reset_path()
- body = json.loads(body)
+ def show_catalog(self):
+ resp, body = self.get('auth/catalog')
self.expected_success(200, resp.status)
- return resp, body
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/endpoint_groups_client.py b/tempest/lib/services/identity/v3/endpoint_groups_client.py
index 723aeaa..ce99389 100644
--- a/tempest/lib/services/identity/v3/endpoint_groups_client.py
+++ b/tempest/lib/services/identity/v3/endpoint_groups_client.py
@@ -1,78 +1,78 @@
-# Copyright 2017 AT&T Corporation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class EndPointGroupsClient(rest_client.RestClient):
- api_version = "v3"
-
- def create_endpoint_group(self, **kwargs):
- """Create endpoint group.
-
- For a full list of available parameters, please refer to the
- official API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/#create-endpoint-group
- """
- post_body = json.dumps({'endpoint_group': kwargs})
- resp, body = self.post('OS-EP-FILTER/endpoint_groups', post_body)
- self.expected_success(201, resp.status)
- body = json.loads(body)
- return rest_client.ResponseBody(resp, body)
-
- def update_endpoint_group(self, endpoint_group_id, **kwargs):
- """Update endpoint group.
-
- For a full list of available parameters, please refer to the
- official API reference:
- https://developer.openstack.org/api-ref/identity/v3-ext/#update-endpoint-group
- """
- post_body = json.dumps({'endpoint_group': kwargs})
- resp, body = self.patch(
- 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id, post_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return rest_client.ResponseBody(resp, body)
-
- def delete_endpoint_group(self, endpoint_group_id):
- """Delete endpoint group."""
- resp_header, resp_body = self.delete(
- 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id)
- self.expected_success(204, resp_header.status)
- return rest_client.ResponseBody(resp_header, resp_body)
-
- def show_endpoint_group(self, endpoint_group_id):
- """Get endpoint group."""
- resp_header, resp_body = self.get(
- 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id)
- self.expected_success(200, resp_header.status)
- resp_body = json.loads(resp_body)
- return rest_client.ResponseBody(resp_header, resp_body)
-
- def check_endpoint_group(self, endpoint_group_id):
- """Check endpoint group."""
- resp_header, resp_body = self.head(
- 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id)
- self.expected_success(200, resp_header.status)
- return rest_client.ResponseBody(resp_header, resp_body)
-
- def list_endpoint_groups(self):
- """Get endpoint groups."""
- resp_header, resp_body = self.get('OS-EP-FILTER/endpoint_groups')
- self.expected_success(200, resp_header.status)
- resp_body = json.loads(resp_body)
- return rest_client.ResponseBody(resp_header, resp_body)
+# Copyright 2017 AT&T Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class EndPointGroupsClient(rest_client.RestClient):
+ api_version = "v3"
+
+ def create_endpoint_group(self, **kwargs):
+ """Create endpoint group.
+
+ For a full list of available parameters, please refer to the
+ official API reference:
+ https://developer.openstack.org/api-ref/identity/v3-ext/#create-endpoint-group
+ """
+ post_body = json.dumps({'endpoint_group': kwargs})
+ resp, body = self.post('OS-EP-FILTER/endpoint_groups', post_body)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_endpoint_group(self, endpoint_group_id, **kwargs):
+ """Update endpoint group.
+
+ For a full list of available parameters, please refer to the
+ official API reference:
+ https://developer.openstack.org/api-ref/identity/v3-ext/#update-endpoint-group
+ """
+ post_body = json.dumps({'endpoint_group': kwargs})
+ resp, body = self.patch(
+ 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id, post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_endpoint_group(self, endpoint_group_id):
+ """Delete endpoint group."""
+ resp_header, resp_body = self.delete(
+ 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id)
+ self.expected_success(204, resp_header.status)
+ return rest_client.ResponseBody(resp_header, resp_body)
+
+ def show_endpoint_group(self, endpoint_group_id):
+ """Get endpoint group."""
+ resp_header, resp_body = self.get(
+ 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id)
+ self.expected_success(200, resp_header.status)
+ resp_body = json.loads(resp_body)
+ return rest_client.ResponseBody(resp_header, resp_body)
+
+ def check_endpoint_group(self, endpoint_group_id):
+ """Check endpoint group."""
+ resp_header, resp_body = self.head(
+ 'OS-EP-FILTER/endpoint_groups/%s' % endpoint_group_id)
+ self.expected_success(200, resp_header.status)
+ return rest_client.ResponseBody(resp_header, resp_body)
+
+ def list_endpoint_groups(self):
+ """Get endpoint groups."""
+ resp_header, resp_body = self.get('OS-EP-FILTER/endpoint_groups')
+ self.expected_success(200, resp_header.status)
+ resp_body = json.loads(resp_body)
+ return rest_client.ResponseBody(resp_header, resp_body)
diff --git a/tempest/lib/services/identity/v3/identity_client.py b/tempest/lib/services/identity/v3/identity_client.py
index 755c14b..ad770bf 100644
--- a/tempest/lib/services/identity/v3/identity_client.py
+++ b/tempest/lib/services/identity/v3/identity_client.py
@@ -44,9 +44,23 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
+ def check_token_existence(self, resp_token):
+ """Validates a token."""
+ headers = {'X-Subject-Token': resp_token}
+ resp, body = self.head("auth/tokens", headers=headers)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def list_auth_projects(self):
"""Get available project scopes."""
resp, body = self.get("auth/projects")
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+
+ def list_auth_domains(self):
+ """Get available domain scopes."""
+ resp, body = self.get("auth/domains")
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/network/security_groups_client.py b/tempest/lib/services/network/security_groups_client.py
index 1f30216..d3ebf20 100644
--- a/tempest/lib/services/network/security_groups_client.py
+++ b/tempest/lib/services/network/security_groups_client.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.lib import exceptions as lib_exc
from tempest.lib.services.network import base
@@ -66,3 +67,10 @@
"""
uri = '/security-groups'
return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_security_group(id)
+ except lib_exc.NotFound:
+ return True
+ return False
diff --git a/tempest/lib/services/network/versions_client.py b/tempest/lib/services/network/versions_client.py
index a9c3bbf..f87fe87 100644
--- a/tempest/lib/services/network/versions_client.py
+++ b/tempest/lib/services/network/versions_client.py
@@ -15,7 +15,6 @@
import time
from oslo_serialization import jsonutils as json
-from six.moves import urllib
from tempest.lib.services.network import base
@@ -25,9 +24,7 @@
def list_versions(self):
"""Do a GET / to fetch available API version information."""
- endpoint = self.base_url
- url = urllib.parse.urlparse(endpoint)
- version_url = '%s://%s/' % (url.scheme, url.netloc)
+ version_url = self._get_base_version_url()
# Note: we do a raw_request here because we want to use
# an unversioned URL, not "v2/$project_id/".
diff --git a/tempest/services/object_storage/__init__.py b/tempest/lib/services/object_storage/__init__.py
similarity index 67%
rename from tempest/services/object_storage/__init__.py
rename to tempest/lib/services/object_storage/__init__.py
index 1738566..4303d09 100644
--- a/tempest/services/object_storage/__init__.py
+++ b/tempest/lib/services/object_storage/__init__.py
@@ -12,13 +12,14 @@
# License for the specific language governing permissions and limitations under
# the License.
-from tempest.services.object_storage.account_client import AccountClient
-from tempest.services.object_storage.bulk_middleware_client import \
+from tempest.lib.services.object_storage.account_client import AccountClient
+from tempest.lib.services.object_storage.bulk_middleware_client import \
BulkMiddlewareClient
-from tempest.services.object_storage.capabilities_client import \
+from tempest.lib.services.object_storage.capabilities_client import \
CapabilitiesClient
-from tempest.services.object_storage.container_client import ContainerClient
-from tempest.services.object_storage.object_client import ObjectClient
+from tempest.lib.services.object_storage.container_client import \
+ ContainerClient
+from tempest.lib.services.object_storage.object_client import ObjectClient
__all__ = ['AccountClient', 'BulkMiddlewareClient', 'CapabilitiesClient',
'ContainerClient', 'ObjectClient']
diff --git a/tempest/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
similarity index 75%
rename from tempest/services/object_storage/account_client.py
rename to tempest/lib/services/object_storage/account_client.py
index 5a1737e..67f01a6 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -50,41 +50,20 @@
return resp, body
def list_account_metadata(self):
- """HEAD on the storage URL
-
- Returns all account metadata headers
- """
+ """List all account metadata."""
resp, body = self.head('')
self.expected_success(204, resp.status)
return resp, body
def list_account_containers(self, params=None):
- """GET on the (base) storage URL
+ """List all containers for the account.
Given valid X-Auth-Token, returns a list of all containers for the
account.
- Optional Arguments:
- limit=[integer value N]
- Limits the number of results to at most N values
- DEFAULT: 10,000
-
- marker=[string value X]
- Given string value X, return object names greater in value
- than the specified marker.
- DEFAULT: No Marker
-
- prefix=[string value Y]
- Given string value Y, return object names starting with that prefix
-
- reverse=[boolean value Z]
- Reverse the result order based on the boolean value Z
- DEFAULT: False
-
- format=[string value, either 'json' or 'xml']
- Specify either json or xml to return the respective serialized
- response.
- DEFAULT: Python-List returned in response body
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/object-store/#show-account-details-and-list-containers
"""
url = '?%s' % urllib.urlencode(params) if params else ''
diff --git a/tempest/services/object_storage/bulk_middleware_client.py b/tempest/lib/services/object_storage/bulk_middleware_client.py
similarity index 76%
rename from tempest/services/object_storage/bulk_middleware_client.py
rename to tempest/lib/services/object_storage/bulk_middleware_client.py
index c194ea9..c11a105 100644
--- a/tempest/services/object_storage/bulk_middleware_client.py
+++ b/tempest/lib/services/object_storage/bulk_middleware_client.py
@@ -24,39 +24,39 @@
To extract containers and objects on Swift cluster from
uploaded archived file. For More information please check:
- http://docs.openstack.org/developer/swift/middleware.html#module-swift.common.middleware.bulk
+ https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.bulk
"""
url = '%s?extract-archive=%s' % (upload_path, archive_file_format)
if headers is None:
headers = {}
resp, body = self.put(url, data, headers)
self.expected_success(200, resp.status)
- return resp, body
+ return rest_client.ResponseBodyData(resp, body)
def delete_bulk_data(self, data=None, headers=None):
"""Delete multiple objects or containers from their account.
For More information please check:
- http://docs.openstack.org/developer/swift/middleware.html#module-swift.common.middleware.bulk
+ https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.bulk
"""
url = '?bulk-delete'
if headers is None:
headers = {}
- resp, body = self.delete(url, headers=headers, body=data)
+ resp, body = self.delete(url, headers, data)
self.expected_success(200, resp.status)
- return resp, body
+ return rest_client.ResponseBodyData(resp, body)
def delete_bulk_data_with_post(self, data=None, headers=None):
"""Delete multiple objects or containers with POST request.
For More information please check:
- http://docs.openstack.org/developer/swift/middleware.html#module-swift.common.middleware.bulk
+ https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.bulk
"""
url = '?bulk-delete'
if headers is None:
headers = {}
- resp, body = self.post(url, headers=headers, body=data)
+ resp, body = self.post(url, data, headers)
self.expected_success([200, 204], resp.status)
- return resp, body
+ return rest_client.ResponseBodyData(resp, body)
diff --git a/tempest/services/object_storage/capabilities_client.py b/tempest/lib/services/object_storage/capabilities_client.py
similarity index 94%
rename from tempest/services/object_storage/capabilities_client.py
rename to tempest/lib/services/object_storage/capabilities_client.py
index 0fe437f..d31bbc2 100644
--- a/tempest/services/object_storage/capabilities_client.py
+++ b/tempest/lib/services/object_storage/capabilities_client.py
@@ -28,4 +28,4 @@
self.reset_path()
body = json.loads(body)
self.expected_success(200, resp.status)
- return resp, body
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
new file mode 100644
index 0000000..2da8e24
--- /dev/null
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -0,0 +1,124 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from xml.etree import ElementTree as etree
+
+import debtcollector.moves
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class ContainerClient(rest_client.RestClient):
+
+ def update_container(self, container_name, **headers):
+ """Creates or Updates a container
+
+ with optional metadata passed in as a dictionary.
+ Full list of allowed headers or value, please refer to the
+ official API reference:
+ https://developer.openstack.org/api-ref/object-store/#create-container
+ """
+ url = str(container_name)
+
+ resp, body = self.put(url, body=None, headers=headers)
+ self.expected_success([201, 202], resp.status)
+ return resp, body
+
+ # NOTE: This alias is for the usability because PUT can be used for both
+ # updating/creating a resource and this PUT is mainly used for creating
+ # on Swift container API.
+ create_container = update_container
+
+ def delete_container(self, container_name):
+ """Deletes the container (if it's empty)."""
+ url = str(container_name)
+ resp, body = self.delete(url)
+ self.expected_success(204, resp.status)
+ return resp, body
+
+ def create_update_or_delete_container_metadata(
+ self, container_name,
+ create_update_metadata=None,
+ delete_metadata=None,
+ create_update_metadata_prefix='X-Container-Meta-',
+ delete_metadata_prefix='X-Remove-Container-Meta-'):
+ """Creates, Updates or deletes an containter metadata entry.
+
+ Container Metadata can be created, updated or deleted based on
+ metadata header or value. For detailed info, please refer to the
+ official API reference:
+ https://developer.openstack.org/api-ref/object-store/#create-update-or-delete-container-metadata
+ """
+ url = str(container_name)
+ headers = {}
+ if create_update_metadata:
+ for key in create_update_metadata:
+ metadata_header_name = create_update_metadata_prefix + key
+ headers[metadata_header_name] = create_update_metadata[key]
+ if delete_metadata:
+ for key in delete_metadata:
+ headers[delete_metadata_prefix + key] = delete_metadata[key]
+
+ resp, body = self.post(url, headers=headers, body=None)
+ self.expected_success(204, resp.status)
+ return resp, body
+
+ update_container_metadata = debtcollector.moves.moved_function(
+ create_update_or_delete_container_metadata,
+ 'update_container_metadata', __name__,
+ version='Queens', removal_version='Rocky')
+
+ def list_container_metadata(self, container_name):
+ """List all container metadata."""
+ url = str(container_name)
+ resp, body = self.head(url)
+ self.expected_success(204, resp.status)
+ return resp, body
+
+ def list_container_objects(self, container_name, params=None):
+ """List the objects in a container, given the container name
+
+ Returns the container object listing as a plain text list, or as
+ xml or json if that option is specified via the 'format' argument.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/object-storage/?expanded=show-container-details-and-list-objects-detail
+ """
+
+ url = str(container_name)
+ if params:
+ url += '?'
+ url += '&%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url, headers={})
+ if params and params.get('format') == 'json':
+ body = json.loads(body)
+ elif params and params.get('format') == 'xml':
+ body = etree.fromstring(body)
+ # Else the content-type is plain/text
+ else:
+ body = [
+ obj_name for obj_name in body.decode().split('\n') if obj_name
+ ]
+
+ self.expected_success([200, 204], resp.status)
+ return resp, body
+
+ list_container_contents = debtcollector.moves.moved_function(
+ list_container_objects, 'list_container_contents', __name__,
+ version='Queens', removal_version='Rocky')
diff --git a/tempest/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
similarity index 62%
rename from tempest/services/object_storage/object_client.py
rename to tempest/lib/services/object_storage/object_client.py
index 6d656ec..383aff6 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -23,7 +23,8 @@
class ObjectClient(rest_client.RestClient):
def create_object(self, container, object_name, data,
- params=None, metadata=None, headers=None):
+ params=None, metadata=None, headers=None,
+ chunked=False):
"""Create storage object."""
if headers is None:
@@ -37,7 +38,7 @@
if params:
url += '?%s' % urlparse.urlencode(params)
- resp, body = self.put(url, data, headers)
+ resp, body = self.put(url, data, headers, chunked=chunked)
self.expected_success(201, resp.status)
return resp, body
@@ -50,28 +51,27 @@
self.expected_success([200, 204], resp.status)
return resp, body
- def update_object_metadata(self, container, object_name, metadata,
- metadata_prefix='X-Object-Meta-'):
+ def create_or_update_object_metadata(self, container, object_name,
+ headers=None):
"""Add, remove, or change X-Object-Meta metadata for storage object."""
- headers = {}
- for key in metadata:
- headers["%s%s" % (str(metadata_prefix), str(key))] = metadata[key]
-
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.post(url, None, headers=headers)
self.expected_success(202, resp.status)
return resp, body
- def list_object_metadata(self, container, object_name):
+ def list_object_metadata(self, container, object_name,
+ params=None, headers=None):
"""List all storage object X-Object-Meta- metadata."""
url = "%s/%s" % (str(container), str(object_name))
- resp, body = self.head(url)
+ if params:
+ url += '?%s' % urlparse.urlencode(params)
+ resp, body = self.head(url, headers=headers)
self.expected_success(200, resp.status)
return resp, body
- def get_object(self, container, object_name, metadata=None):
+ def get_object(self, container, object_name, metadata=None, params=None):
"""Retrieve object's data."""
headers = {}
@@ -80,45 +80,12 @@
headers[str(key)] = metadata[key]
url = "{0}/{1}".format(container, object_name)
+ if params:
+ url += '?%s' % urlparse.urlencode(params)
resp, body = self.get(url, headers=headers)
self.expected_success([200, 206], resp.status)
return resp, body
- def copy_object_in_same_container(self, container, src_object_name,
- dest_object_name, metadata=None):
- """Copy storage object's data to the new object using PUT."""
-
- url = "{0}/{1}".format(container, dest_object_name)
- headers = {}
- headers['X-Copy-From'] = "%s/%s" % (str(container),
- str(src_object_name))
- headers['content-length'] = '0'
- if metadata:
- for key in metadata:
- headers[str(key)] = metadata[key]
-
- resp, body = self.put(url, None, headers=headers)
- self.expected_success(201, resp.status)
- return resp, body
-
- def copy_object_across_containers(self, src_container, src_object_name,
- dst_container, dst_object_name,
- metadata=None):
- """Copy storage object's data to the new object using PUT."""
-
- url = "{0}/{1}".format(dst_container, dst_object_name)
- headers = {}
- headers['X-Copy-From'] = "%s/%s" % (str(src_container),
- str(src_object_name))
- headers['content-length'] = '0'
- if metadata:
- for key in metadata:
- headers[str(key)] = metadata[key]
-
- resp, body = self.put(url, None, headers=headers)
- self.expected_success(201, resp.status)
- return resp, body
-
def copy_object_2d_way(self, container, src_object_name, dest_object_name,
metadata=None):
"""Copy storage object's data to the new object using COPY."""
@@ -135,38 +102,6 @@
self.expected_success(201, resp.status)
return resp, body
- def create_object_segments(self, container, object_name, segment, data):
- """Creates object segments."""
- url = "{0}/{1}/{2}".format(container, object_name, segment)
- resp, body = self.put(url, data)
- self.expected_success(201, resp.status)
- return resp, body
-
- def put_object_with_chunk(self, container, name, contents):
- """Put an object with Transfer-Encoding header
-
- :param container: name of the container
- :type container: string
- :param name: name of the object
- :type name: string
- :param contents: object data
- :type contents: iterable
- """
- headers = {'Transfer-Encoding': 'chunked'}
- if self.token:
- headers['X-Auth-Token'] = self.token
-
- url = "%s/%s" % (container, name)
- resp, body = self.put(
- url, headers=headers,
- body=contents,
- chunked=True
- )
-
- self._error_checker(resp, body)
- self.expected_success(201, resp.status)
- return resp.status, resp.reason, resp
-
def create_object_continue(self, container, object_name,
data, metadata=None):
"""Put an object using Expect:100-continue"""
@@ -183,8 +118,7 @@
path = str(parsed.path) + "/"
path += "%s/%s" % (str(container), str(object_name))
- conn = create_connection(parsed)
-
+ conn = _create_connection(parsed)
# Send the PUT request and the headers including the "Expect" header
conn.putrequest('PUT', path)
@@ -218,7 +152,7 @@
return resp.status, resp.reason
-def create_connection(parsed_url):
+def _create_connection(parsed_url):
"""Helper function to create connection with httplib
:param parsed_url: parsed url of the remote location
diff --git a/tempest/lib/services/volume/v1/backups_client.py b/tempest/lib/services/volume/v1/backups_client.py
index 8677913..77c40b3 100644
--- a/tempest/lib/services/volume/v1/backups_client.py
+++ b/tempest/lib/services/volume/v1/backups_client.py
@@ -102,3 +102,8 @@
except lib_exc.NotFound:
return True
return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'backup'
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
old mode 100755
new mode 100644
index 067b4e8..0fac6bd
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ b/tempest/lib/services/volume/v1/encryption_types_client.py
@@ -49,9 +49,9 @@
def create_encryption_type(self, volume_type_id, **kwargs):
"""Create encryption type.
- TODO: Current api-site doesn't contain this API description.
- After fixing the api-site, we need to fix here also for putting
- the link to api-site.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-an-encryption-type-for-v2
"""
url = "/types/%s/encryption" % volume_type_id
post_body = json.dumps({'encryption': kwargs})
diff --git a/tempest/lib/services/volume/v1/hosts_client.py b/tempest/lib/services/volume/v1/hosts_client.py
index 56ba12c..9b19b84 100644
--- a/tempest/lib/services/volume/v1/hosts_client.py
+++ b/tempest/lib/services/volume/v1/hosts_client.py
@@ -23,8 +23,12 @@
"""Client class to send CRUD Volume Host API V1 requests"""
def list_hosts(self, **params):
- """Lists all hosts."""
+ """Lists all hosts.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-all-hosts
+ """
url = 'os-hosts'
if params:
url += '?%s' % urllib.urlencode(params)
diff --git a/tempest/lib/services/volume/v1/qos_client.py b/tempest/lib/services/volume/v1/qos_client.py
index e247b7b..593bddd 100644
--- a/tempest/lib/services/volume/v1/qos_client.py
+++ b/tempest/lib/services/volume/v1/qos_client.py
@@ -92,7 +92,9 @@
:param keys: keys to delete from the QoS specification.
- TODO(jordanP): Add a link once LP #1524877 is fixed.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#unset-keys-in-qos-specification
"""
put_body = json.dumps({'keys': keys})
resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
diff --git a/tempest/lib/services/volume/v1/quotas_client.py b/tempest/lib/services/volume/v1/quotas_client.py
index 678fd82..84f34f2 100644
--- a/tempest/lib/services/volume/v1/quotas_client.py
+++ b/tempest/lib/services/volume/v1/quotas_client.py
@@ -47,7 +47,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref-blockstorage-v1.html#updateQuota
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-quotas
"""
put_body = jsonutils.dumps({'quota_set': kwargs})
resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
diff --git a/tempest/lib/services/volume/v1/snapshots_client.py b/tempest/lib/services/volume/v1/snapshots_client.py
index 3433e68..51f7b9b 100644
--- a/tempest/lib/services/volume/v1/snapshots_client.py
+++ b/tempest/lib/services/volume/v1/snapshots_client.py
@@ -27,7 +27,8 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#list-snapshots-with-details-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-snapshots
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
"""
url = 'snapshots'
if detail:
@@ -45,7 +46,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#show-snapshot-details-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
"""
url = "snapshots/%s" % snapshot_id
resp, body = self.get(url)
@@ -58,7 +59,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-snapshot-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-snapshot
"""
post_body = json.dumps({'snapshot': kwargs})
resp, body = self.post('snapshots', post_body)
@@ -71,7 +72,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#delete-snapshot-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#delete-snapshot
"""
resp, body = self.delete("snapshots/%s" % snapshot_id)
self.expected_success(202, resp.status)
@@ -123,7 +124,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-snapshot-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-snapshot
"""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
@@ -136,7 +137,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#show-snapshot-metadata-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
"""
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.get(url)
@@ -149,7 +150,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-snapshot-metadata-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
"""
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata" % snapshot_id
diff --git a/tempest/lib/services/volume/v1/types_client.py b/tempest/lib/services/volume/v1/types_client.py
index 4ae9935..58a80b7 100644
--- a/tempest/lib/services/volume/v1/types_client.py
+++ b/tempest/lib/services/volume/v1/types_client.py
@@ -40,7 +40,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#list-volume-types-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-all-volume-types-for-v2
"""
url = 'types'
if params:
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#show-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#show-volume-type-details-for-v2
"""
url = "types/%s" % volume_type_id
resp, body = self.get(url)
@@ -69,7 +69,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-volume-type-for-v2
"""
post_body = json.dumps({'volume_type': kwargs})
resp, body = self.post('types', post_body)
@@ -82,7 +82,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#delete-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#delete-volume-type
"""
resp, body = self.delete("types/%s" % volume_type_id)
self.expected_success(202, resp.status)
@@ -137,7 +137,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-volume-type
"""
put_body = json.dumps({'volume_type': kwargs})
resp, body = self.put('types/%s' % volume_type_id, put_body)
@@ -155,7 +155,7 @@
updated value.
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-extra-specs-for-a-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
"""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_spec_name)
put_body = json.dumps(extra_specs)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
index 7a25697..0e6ea9f 100644
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -38,6 +38,11 @@
"""List all the volumes created.
Params can be a string (must be urlencoded) or a dictionary.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-volumes
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
"""
url = 'volumes'
if detail:
@@ -63,7 +68,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
@@ -76,7 +81,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-volume
"""
put_body = json.dumps({'volume': kwargs})
resp, body = self.put('volumes/%s' % volume_id, put_body)
@@ -104,7 +109,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#attach-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
"""
post_body = json.dumps({'os-attach': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -161,7 +166,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#extend-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#extend-volume-size
"""
post_body = json.dumps({'os-extend': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -174,7 +179,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#reset-volume-status
+ https://developer.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
"""
post_body = json.dumps({'os-reset_status': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
@@ -186,7 +191,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-volume-transfer
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-volume-transfer
"""
post_body = json.dumps({'transfer': kwargs})
resp, body = self.post('os-volume-transfer', post_body)
@@ -207,7 +212,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#list-volume-transfers
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-volume-transfers
"""
url = 'os-volume-transfer'
if params:
@@ -228,7 +233,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#accept-volume-transfer
+ https://developer.openstack.org/api-ref/block-storage/v2/#accept-volume-transfer
"""
url = 'os-volume-transfer/%s/accept' % transfer_id
post_body = json.dumps({'accept': kwargs})
diff --git a/tempest/lib/services/volume/v2/backups_client.py b/tempest/lib/services/volume/v2/backups_client.py
index 2b5e82d..adfa6a6 100644
--- a/tempest/lib/services/volume/v2/backups_client.py
+++ b/tempest/lib/services/volume/v2/backups_client.py
@@ -14,12 +14,14 @@
# under the License.
from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.volume import base_client
-class BackupsClient(rest_client.RestClient):
+class BackupsClient(base_client.BaseClient):
"""Volume V2 Backups client"""
api_version = "v2"
@@ -63,11 +65,19 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
- def list_backups(self, detail=False):
- """Information for all the tenant's backups."""
+ def list_backups(self, detail=False, **params):
+ """List all the tenant's backups.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ http://developer.openstack.org/api-ref/block-storage/v2/#list-backups
+ http://developer.openstack.org/api-ref/block-storage/v2/#list-backups-with-details
+ """
url = "backups"
if detail:
url += "/detail"
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
@@ -102,3 +112,8 @@
except lib_exc.NotFound:
return True
return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'backup'
diff --git a/tempest/lib/services/volume/v2/encryption_types_client.py b/tempest/lib/services/volume/v2/encryption_types_client.py
old mode 100755
new mode 100644
diff --git a/tempest/lib/services/volume/v2/snapshots_client.py b/tempest/lib/services/volume/v2/snapshots_client.py
index 5f4e7de..4bc2842 100644
--- a/tempest/lib/services/volume/v2/snapshots_client.py
+++ b/tempest/lib/services/volume/v2/snapshots_client.py
@@ -164,6 +164,14 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def show_snapshot_metadata_item(self, snapshot_id, id):
+ """Show metadata item for the snapshot."""
+ url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
"""Update metadata item for the snapshot."""
# TODO(piyush): Current api-site doesn't contain this API description.
diff --git a/tempest/lib/services/volume/v2/volumes_client.py b/tempest/lib/services/volume/v2/volumes_client.py
index cfff16a..da3f2b5 100644
--- a/tempest/lib/services/volume/v2/volumes_client.py
+++ b/tempest/lib/services/volume/v2/volumes_client.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from debtcollector import moves
-from debtcollector import removals
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
@@ -22,43 +20,12 @@
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
-from tempest.lib.services.volume.v2 import transfers_client
class VolumesClient(base_client.BaseClient):
"""Client class to send CRUD Volume V2 API requests"""
api_version = "v2"
- create_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.create_volume_transfer,
- 'VolumesClient.create_volume_transfer', __name__,
- message='Use create_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- show_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.show_volume_transfer,
- 'VolumesClient.show_volume_transfer', __name__,
- message='Use show_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- list_volume_transfers = moves.moved_function(
- transfers_client.TransfersClient.list_volume_transfers,
- 'VolumesClient.list_volume_transfers', __name__,
- message='Use list_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- delete_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.delete_volume_transfer,
- 'VolumesClient.delete_volume_transfer', __name__,
- message='Use delete_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- accept_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.accept_volume_transfer,
- 'VolumesClient.accept_volume_transfer', __name__,
- message='Use accept_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
def _prepare_params(self, params):
"""Prepares params for use in get or _ext_get methods.
@@ -197,10 +164,18 @@
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
+ """Check the specified resource is deleted or not.
+
+ :param id: A checked resource id
+ :raises lib_exc.DeleteErrorException: If the specified resource is on
+ the status the delete was failed.
+ """
try:
- self.show_volume(id)
+ volume = self.show_volume(id)
except lib_exc.NotFound:
return True
+ if volume["volume"]["status"] == "error_deleting":
+ raise lib_exc.DeleteErrorException(resource_id=id)
return False
@property
@@ -284,6 +259,14 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def show_volume_metadata_item(self, volume_id, id):
+ """Show metadata item for the volume."""
+ url = "volumes/%s/metadata/%s" % (volume_id, id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def update_volume_metadata_item(self, volume_id, id, meta_item):
"""Update metadata item for the volume."""
put_body = json.dumps({'meta': meta_item})
@@ -310,6 +293,7 @@
post_body = json.dumps({'os-retype': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
def force_detach_volume(self, volume_id, **kwargs):
"""Force detach a volume.
@@ -346,30 +330,11 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
- @removals.remove(message="use list_pools from tempest.lib.services."
- "volume.v2.scheduler_stats_client")
- def show_pools(self, detail=False):
- # List all the volumes pools (hosts)
- url = 'scheduler-stats/get_pools'
- if detail:
- url += '?detail=True'
-
- resp, body = self.get(url)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- @removals.remove(message="use show_backend_capabilities from tempest.lib."
- "services.volume.v2.capabilities_client")
- def show_backend_capabilities(self, host):
- """Shows capabilities for a storage back end.
-
- For a full list of available parameters, please refer to the official
- API reference:
- http://developer.openstack.org/api-ref/block-storage/v2/#show-back-end-capabilities
- """
- url = 'capabilities/%s' % host
- resp, body = self.get(url)
+ def show_volume_image_metadata(self, volume_id):
+ """Show image metadata for the volume."""
+ post_body = json.dumps({'os-show_image_metadata': {}})
+ url = "volumes/%s/action" % volume_id
+ resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/__init__.py b/tempest/lib/services/volume/v3/__init__.py
index a351d61..2d85553 100644
--- a/tempest/lib/services/volume/v3/__init__.py
+++ b/tempest/lib/services/volume/v3/__init__.py
@@ -12,12 +12,18 @@
# License for the specific language governing permissions and limitations under
# the License.
+from tempest.lib.services.volume.v3.backups_client import BackupsClient
from tempest.lib.services.volume.v3.base_client import BaseClient
+from tempest.lib.services.volume.v3.group_snapshots_client import \
+ GroupSnapshotsClient
from tempest.lib.services.volume.v3.group_types_client import GroupTypesClient
from tempest.lib.services.volume.v3.groups_client import GroupsClient
from tempest.lib.services.volume.v3.messages_client import MessagesClient
+from tempest.lib.services.volume.v3.snapshots_client import SnapshotsClient
from tempest.lib.services.volume.v3.versions_client import VersionsClient
from tempest.lib.services.volume.v3.volumes_client import VolumesClient
-__all__ = ['BaseClient', 'GroupsClient', 'GroupTypesClient',
- 'MessagesClient', 'VersionsClient', 'VolumesClient']
+__all__ = ['BackupsClient', 'BaseClient', 'GroupsClient',
+ 'GroupSnapshotsClient', 'GroupTypesClient',
+ 'MessagesClient', 'SnapshotsClient', 'VersionsClient',
+ 'VolumesClient']
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
new file mode 100644
index 0000000..e742e39
--- /dev/null
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -0,0 +1,37 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.volume.v2 import backups_client
+
+
+class BackupsClient(backups_client.BackupsClient):
+ """Volume V3 Backups client"""
+ api_version = "v3"
+
+ def update_backup(self, backup_id, **kwargs):
+ """Updates the specified volume backup.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-a-backup
+ """
+ put_body = json.dumps({'backup': kwargs})
+ resp, body = self.put('backups/%s' % backup_id, put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/group_snapshots_client.py b/tempest/lib/services/volume/v3/group_snapshots_client.py
new file mode 100644
index 0000000..6e53e3e
--- /dev/null
+++ b/tempest/lib/services/volume/v3/group_snapshots_client.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.volume import base_client
+
+
+class GroupSnapshotsClient(base_client.BaseClient):
+ """Client class to send CRUD Volume Group Snapshot API requests"""
+ api_version = 'v3'
+
+ def create_group_snapshot(self, **kwargs):
+ """Creates a group snapshot.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#create-group-snapshot
+ """
+ post_body = json.dumps({'group_snapshot': kwargs})
+ resp, body = self.post('group_snapshots', post_body)
+ body = json.loads(body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_group_snapshot(self, group_snapshot_id):
+ """Deletes a group snapshot.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#delete-group-snapshot
+ """
+ resp, body = self.delete('group_snapshots/%s' % group_snapshot_id)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_group_snapshot(self, group_snapshot_id):
+ """Returns the details of a single group snapshot.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#show-group-snapshot-details
+ """
+ url = "group_snapshots/%s" % str(group_snapshot_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_group_snapshots(self, detail=False, **params):
+ """Information for all the tenant's group snapshots.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#list-group-snapshots
+ https://developer.openstack.org/api-ref/block-storage/v3/#list-group-snapshots-with-details
+ """
+ url = "group_snapshots"
+ if detail:
+ url += "/detail"
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def reset_group_snapshot_status(self, group_snapshot_id, status_to_set):
+ """Resets group snapshot status.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#reset-group-snapshot-status
+ """
+ post_body = json.dumps({'reset_status': {'status': status_to_set}})
+ resp, body = self.post('group_snapshots/%s/action' % group_snapshot_id,
+ post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_group_snapshot(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'group-snapshot'
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index a6edbf5..97bac48 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -14,6 +14,7 @@
# under the License.
from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
from tempest.lib.services.volume import base_client
@@ -46,3 +47,31 @@
resp, body = self.delete("group_types/%s" % group_type_id)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def list_group_types(self, **params):
+ """List all the group_types created.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#list-group-types
+ """
+ url = 'group_types'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_group_type(self, group_type_id):
+ """Returns the details of a single group_type.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#show-group-type-details
+ """
+ url = "group_types/%s" % group_type_id
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/groups_client.py b/tempest/lib/services/volume/v3/groups_client.py
index 9b53bb7..e2e477d 100644
--- a/tempest/lib/services/volume/v3/groups_client.py
+++ b/tempest/lib/services/volume/v3/groups_client.py
@@ -84,6 +84,42 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def create_group_from_source(self, **kwargs):
+ """Creates a group from source.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#create-group-from-source
+ """
+ post_body = json.dumps({'create-from-src': kwargs})
+ resp, body = self.post('groups/action', post_body)
+ body = json.loads(body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_group(self, group_id, **kwargs):
+ """Updates the specified group.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-group
+ """
+ put_body = json.dumps({'group': kwargs})
+ resp, body = self.put('groups/%s' % group_id, put_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def reset_group_status(self, group_id, status_to_set):
+ """Resets group status.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#reset-group-status
+ """
+ post_body = json.dumps({'reset_status': {'status': status_to_set}})
+ resp, body = self.post('groups/%s/action' % group_id, post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def is_resource_deleted(self, id):
try:
self.show_group(id)
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
new file mode 100644
index 0000000..88c094f
--- /dev/null
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v2 import snapshots_client
+
+
+class SnapshotsClient(snapshots_client.SnapshotsClient):
+ """Client class to send CRUD Volume Snapshot V3 API requests."""
+ api_version = "v3"
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
index 38e0de9..ad300c2 100644
--- a/tempest/scenario/README.rst
+++ b/tempest/scenario/README.rst
@@ -14,16 +14,16 @@
Any scenario test should have a real-life use case. An example would be:
- - "As operator I want to start with a blank environment":
- 1. upload a glance image
- 2. deploy a vm from it
- 3. ssh to the guest
- 4. create a snapshot of the vm
+- "As operator I want to start with a blank environment":
+ 1. upload a glance image
+ 2. deploy a vm from it
+ 3. ssh to the guest
+ 4. create a snapshot of the vm
-Why are these tests in tempest?
+Why are these tests in Tempest?
-------------------------------
-This is one of tempests core purposes, testing the integration between
+This is one of Tempest's core purposes, testing the integration between
projects.
@@ -43,7 +43,7 @@
specific in your interactions. A giant "this is my data center" smoke
test is hard to debug when it goes wrong.
-A flow of interactions between glance and nova, like in the
+A flow of interactions between Glance and Nova, like in the
introduction, is a good example. Especially if it involves a repeated
interaction when a resource is setup, modified, detached, and then
reused later again.
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 38e03c7..06b4b59 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -79,24 +79,24 @@
cls.security_groups_client = cls.os_primary.security_groups_client
cls.security_group_rules_client = (
cls.os_primary.security_group_rules_client)
- cls.volumes_client = cls.os_primary.volumes_v2_client
- cls.snapshots_client = cls.os_primary.snapshots_v2_client
+ # Use the latest available volume clients
+ if CONF.service_available.cinder:
+ cls.volumes_client = cls.os_primary.volumes_client_latest
+ cls.snapshots_client = cls.os_primary.snapshots_client_latest
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
- def _create_port(self, network_id, client=None, namestart='port-quotatest',
- **kwargs):
+ def create_port(self, network_id, client=None, **kwargs):
if not client:
client = self.ports_client
- name = data_utils.rand_name(namestart)
+ name = data_utils.rand_name(self.__class__.__name__)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
- self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
@@ -145,8 +145,7 @@
if vnic_type:
ports = []
- create_port_body = {'binding:vnic_type': vnic_type,
- 'namestart': 'port-smoke'}
+ create_port_body = {'binding:vnic_type': vnic_type}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
@@ -183,9 +182,9 @@
for net in networks:
net_id = net.get('uuid', net.get('id'))
if 'port' not in net:
- port = self._create_port(network_id=net_id,
- client=clients.ports_client,
- **create_port_body)
+ port = self.create_port(network_id=net_id,
+ client=clients.ports_client,
+ **create_port_body)
ports.append({'port': port['id']})
else:
ports.append({'port': net['port']})
@@ -238,9 +237,26 @@
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
+ def create_volume_snapshot(self, volume_id, name=None, description=None,
+ metadata=None, force=False):
+ name = name or data_utils.rand_name(
+ self.__class__.__name__ + '-snapshot')
+ snapshot = self.snapshots_client.create_snapshot(
+ volume_id=volume_id,
+ force=force,
+ display_name=name,
+ description=description,
+ metadata=metadata)['snapshot']
+ self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
+ snapshot['id'])
+ self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot['id'], 'available')
+ return snapshot
+
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
- client = self.admin_volume_types_client
+ client = self.os_admin.volume_types_v2_client
if not name:
class_name = self.__class__.__name__
name = data_utils.rand_name(class_name + '-volume-type')
@@ -252,10 +268,8 @@
if backend_name:
extra_specs = {"volume_backend_name": backend_name}
- body = client.create_volume_type(name=randomized_name,
- extra_specs=extra_specs)
- volume_type = body['volume_type']
- self.assertIn('id', volume_type)
+ volume_type = client.create_volume_type(
+ name=randomized_name, extra_specs=extra_specs)['volume_type']
self.addCleanup(client.delete_volume_type, volume_type['id'])
return volume_type
@@ -487,27 +501,6 @@
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
- volume = self.volumes_client.show_volume(volume['id'])['volume']
- self.assertEqual('available', volume['status'])
-
- def rebuild_server(self, server_id, image=None,
- preserve_ephemeral=False, wait=True,
- rebuild_kwargs=None):
- if image is None:
- image = CONF.compute.image_ref
-
- rebuild_kwargs = rebuild_kwargs or {}
-
- LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
- server_id, image, preserve_ephemeral)
- self.servers_client.rebuild_server(
- server_id=server_id, image_ref=image,
- preserve_ephemeral=preserve_ephemeral,
- **rebuild_kwargs)
- if wait:
- waiters.wait_for_server_status(self.servers_client,
- server_id, 'ACTIVE')
-
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
@@ -656,9 +649,7 @@
addresses = server['addresses'][
CONF.validation.network_for_ssh]
else:
- creds_provider = self._get_credentials_provider()
- net_creds = creds_provider.get_primary_creds()
- network = getattr(net_creds, 'network', None)
+ network = self.get_tenant_network()
addresses = (server['addresses'][network['name']]
if network else [])
for address in addresses:
@@ -713,17 +704,14 @@
network['id'])
return network
- def _create_subnet(self, network, subnets_client=None,
- routers_client=None, namestart='subnet-smoke',
- **kwargs):
+ def create_subnet(self, network, subnets_client=None,
+ namestart='subnet-smoke', **kwargs):
"""Create a subnet for the given network
within the cidr block configured for tenant networks.
"""
if not subnets_client:
subnets_client = self.subnets_client
- if not routers_client:
- routers_client = self.routers_client
def cidr_in_use(cidr, tenant_id):
"""Check cidr existence
@@ -839,22 +827,6 @@
floating_ip['id'])
return floating_ip
- def _associate_floating_ip(self, floating_ip, server):
- port_id, _ = self._get_server_port_id_and_ip4(server)
- kwargs = dict(port_id=port_id)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertEqual(port_id, floating_ip['port_id'])
- return floating_ip
-
- def _disassociate_floating_ip(self, floating_ip):
- """:param floating_ip: floating_ips_client.create_floatingip"""
- kwargs = dict(port_id=None)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertIsNone(floating_ip['port_id'])
- return floating_ip
-
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
@@ -882,11 +854,11 @@
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
+ def check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True,
+ servers_for_debug=None):
if not CONF.network.project_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
@@ -906,16 +878,13 @@
self._log_net_info(e)
raise
- def _check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
+ def check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
"""assert ping server via source ssh connection
- Note: This is an internal method. Use check_remote_connectivity
- instead.
-
:param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
+ :param dest: an IP to ping against
+ :param should_succeed: boolean: should ping succeed or not
:param nic: specific network interface to ping from
"""
def ping_remote():
@@ -927,28 +896,19 @@
return not should_succeed
return should_succeed
- return test_utils.call_until_true(ping_remote,
- CONF.validation.ping_timeout,
- 1)
+ result = test_utils.call_until_true(ping_remote,
+ CONF.validation.ping_timeout, 1)
+ if result:
+ return
- def check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
- """assert ping server via source ssh connection
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :param nic: specific network interface to ping from
- """
- result = self._check_remote_connectivity(source, dest, should_succeed,
- nic)
source_host = source.ssh_client.host
if should_succeed:
msg = "Timed out waiting for %s to become reachable from %s" \
% (dest, source_host)
else:
msg = "%s is reachable from %s" % (dest, source_host)
- self.assertTrue(result, msg)
+ self._log_console_output()
+ self.fail(msg)
def _create_security_group(self, security_group_rules_client=None,
tenant_id=None,
@@ -1005,23 +965,6 @@
client.delete_security_group, secgroup['id'])
return secgroup
- def _default_security_group(self, client=None, tenant_id=None):
- """Get default secgroup for given tenant_id.
-
- :returns: default secgroup for given tenant
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sgs = [
- sg for sg in list(client.list_security_groups().values())[0]
- if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
- ]
- msg = "No default security group for tenant %s." % (tenant_id)
- self.assertNotEmpty(sgs, msg)
- return sgs[0]
-
def _create_security_group_rule(self, secgroup=None,
sec_group_rules_client=None,
tenant_id=None,
@@ -1050,8 +993,12 @@
if not tenant_id:
tenant_id = security_groups_client.tenant_id
if secgroup is None:
- secgroup = self._default_security_group(
- client=security_groups_client, tenant_id=tenant_id)
+ # Get default secgroup for tenant_id
+ default_secgroups = security_groups_client.list_security_groups(
+ name='default', tenant_id=tenant_id)['security_groups']
+ msg = "No default security group for tenant %s." % (tenant_id)
+ self.assertNotEmpty(default_secgroups, msg)
+ secgroup = default_secgroups[0]
ruleset = dict(security_group_id=secgroup['id'],
tenant_id=secgroup['tenant_id'])
@@ -1139,37 +1086,18 @@
body = client.show_router(router_id)
return body['router']
elif network_id:
- router = self._create_router(client, tenant_id)
- kwargs = {'external_gateway_info': dict(network_id=network_id)}
- router = client.update_router(router['id'], **kwargs)['router']
+ router = client.create_router(
+ name=data_utils.rand_name(self.__class__.__name__ + '-router'),
+ admin_state_up=True,
+ tenant_id=tenant_id,
+ external_gateway_info=dict(network_id=network_id))['router']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_router, router['id'])
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
- def _create_router(self, client=None, tenant_id=None,
- namestart='router-smoke'):
- if not client:
- client = self.routers_client
- if not tenant_id:
- tenant_id = client.tenant_id
- name = data_utils.rand_name(namestart)
- result = client.create_router(name=name,
- admin_state_up=True,
- tenant_id=tenant_id)
- router = result['router']
- self.assertEqual(router['name'], name)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_router,
- router['id'])
- return router
-
- def _update_router_admin_state(self, router, admin_state_up):
- kwargs = dict(admin_state_up=admin_state_up)
- router = self.routers_client.update_router(
- router['id'], **kwargs)['router']
- self.assertEqual(admin_state_up, router['admin_state_up'])
-
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
@@ -1204,12 +1132,11 @@
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
- subnets_client=subnets_client,
- routers_client=routers_client)
+ subnets_client=subnets_client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
- subnet = self._create_subnet(**subnet_kwargs)
+ subnet = self.create_subnet(**subnet_kwargs)
if not routers_client:
routers_client = self.routers_client
router_id = router['id']
@@ -1249,6 +1176,17 @@
type_id, provider=provider, key_size=key_size, cipher=cipher,
control_location=control_location)['encryption']
+ def create_encrypted_volume(self, encryption_provider, volume_type,
+ key_size=256, cipher='aes-xts-plain64',
+ control_location='front-end'):
+ volume_type = self.create_volume_type(name=volume_type)
+ self.create_encryption_type(type_id=volume_type['id'],
+ provider=encryption_provider,
+ key_size=key_size,
+ cipher=cipher,
+ control_location=control_location)
+ return self.create_volume(volume_type=volume_type['name'])
+
class ObjectStorageScenarioTest(ScenarioTest):
"""Provide harness to do Object Storage scenario tests.
@@ -1288,7 +1226,7 @@
def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
- self.container_client.create_container(name)
+ self.container_client.update_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
LOG.debug('Container %s created', name)
@@ -1325,7 +1263,7 @@
present_obj = []
if not_present_obj is None:
not_present_obj = []
- _, object_list = self.container_client.list_container_contents(
+ _, object_list = self.container_client.list_container_objects(
container_name)
if present_obj:
for obj in present_obj:
@@ -1334,14 +1272,6 @@
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
- def change_container_acl(self, container_name, acl):
- metadata_param = {'metadata_prefix': 'x-container-',
- 'metadata': {'read': acl}}
- self.container_client.update_container_metadata(container_name,
- **metadata_param)
- resp, _ = self.container_client.list_container_metadata(container_name)
- self.assertEqual(resp['x-container-read'], acl)
-
def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 25227be..9ff6227 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -14,10 +14,10 @@
# under the License.
from tempest.common import tempest_fixtures as fixtures
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
class TestAggregatesBasicOps(manager.ScenarioTest):
@@ -97,7 +97,7 @@
@decorators.idempotent_id('cb2b4c4f-0c7c-4164-bdde-6285b302a081')
@decorators.attr(type='slow')
- @test.services('compute')
+ @utils.services('compute')
def test_aggregate_basic_ops(self):
self.useFixture(fixtures.LockFixture('availability_zone'))
az = 'foo_zone'
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index d7b86f6..b5220e9 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -48,22 +48,13 @@
return self.create_server(image_id=image, key_name=keypair['name'])
- def create_encrypted_volume(self, encryption_provider, volume_type):
- volume_type = self.create_volume_type(name=volume_type)
- self.create_encryption_type(type_id=volume_type['id'],
- provider=encryption_provider,
- key_size=256,
- cipher='aes-xts-plain64',
- control_location='front-end')
- return self.create_volume(volume_type=volume_type['name'])
-
def attach_detach_volume(self, server, volume):
attached_volume = self.nova_volume_attach(server, volume)
self.nova_volume_detach(server, attached_volume)
@decorators.idempotent_id('79165fb4-5534-4b9d-8429-97ccffb8f86e')
@decorators.attr(type='slow')
- @test.services('compute', 'volume', 'image')
+ @utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
server = self.launch_instance()
volume = self.create_encrypted_volume('nova.volume.encryptors.'
@@ -73,7 +64,7 @@
@decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
@decorators.attr(type='slow')
- @test.services('compute', 'volume', 'image')
+ @utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
server = self.launch_instance()
volume = self.create_encrypted_volume('nova.volume.encryptors.'
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 26a834b..29f1743 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -16,13 +16,13 @@
import testtools
from tempest.common import custom_matchers
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -105,7 +105,7 @@
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.network_feature_enabled.floating_ips,
'Floating ips are not available')
- @test.services('compute', 'volume', 'image', 'network')
+ @utils.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
image = self.glance_image_create()
keypair = self.create_keypair()
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index c8add8b..7c404ad 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -15,11 +15,11 @@
import testtools
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -59,7 +59,7 @@
def _setup_server(self, keypair):
security_groups = []
- if test.is_extension_enabled('security-group', 'network'):
+ if utils.is_extension_enabled('security-group', 'network'):
security_group = self._create_security_group()
security_groups = [{'name': security_group['name']}]
network, _, _ = self.create_networks()
@@ -83,7 +83,7 @@
should_connect=True):
username = CONF.validation.image_ssh_user
private_key = keypair['private_key']
- self._check_tenant_network_connectivity(
+ self.check_tenant_network_connectivity(
server, username, private_key,
should_connect=should_connect,
servers_for_debug=[server])
@@ -107,7 +107,7 @@
@decorators.idempotent_id('61f1aa9a-1573-410e-9054-afa557cab021')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_stop_start(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
@@ -122,7 +122,7 @@
server, keypair, floating_ip)
@decorators.idempotent_id('7b6860c2-afa3-4846-9522-adeb38dfbe08')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_reboot(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
@@ -133,7 +133,7 @@
@decorators.idempotent_id('88a529c2-1daa-4c85-9aec-d541ba3eb699')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_rebuild(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
@@ -148,7 +148,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_pause_unpause(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
@@ -166,7 +166,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_suspend_resume(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
@@ -184,7 +184,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize is not available.')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_resize(self):
resize_flavor = CONF.compute.flavor_ref_alt
keypair = self.create_keypair()
@@ -205,7 +205,7 @@
'Less than 2 compute nodes, skipping multinode '
'tests.')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_cold_migration(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
@@ -231,7 +231,7 @@
'Less than 2 compute nodes, skipping multinode '
'tests.')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_connectivity_cold_migration_revert(self):
keypair = self.create_keypair()
server = self._setup_server(keypair)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 48ddac6..ff8837f 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -19,13 +19,13 @@
from oslo_log import log as logging
import testtools
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -87,7 +87,7 @@
'public_network_id must be defined.')
raise cls.skipException(msg)
for ext in ['router', 'security-group']:
- if not test.is_extension_enabled(ext, 'network'):
+ if not utils.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
if not CONF.network_feature_enabled.floating_ips:
@@ -113,11 +113,16 @@
port_id = None
if boot_with_port:
# create a port on the network and boot with that
- port_id = self._create_port(self.network['id'])['id']
+ port_id = self.create_port(self.network['id'])['id']
self.ports.append({'port': port_id})
server = self._create_server(self.network, port_id)
- self._check_tenant_network_connectivity()
+ ssh_login = CONF.validation.image_ssh_user
+ for server in self.servers:
+ # call the common method in the parent class
+ self.check_tenant_network_connectivity(
+ server, ssh_login, self._get_server_key(server),
+ servers_for_debug=self.servers)
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
@@ -170,15 +175,6 @@
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
- def _check_tenant_network_connectivity(self):
- ssh_login = CONF.validation.image_ssh_user
- for server in self.servers:
- # call the common method in the parent class
- super(TestNetworkBasicOps, self).\
- _check_tenant_network_connectivity(
- server, ssh_login, self._get_server_key(server),
- servers_for_debug=self.servers)
-
def check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True, mtu=None):
@@ -213,25 +209,28 @@
def _disassociate_floating_ips(self):
floating_ip, _ = self.floating_ip_tuple
- self._disassociate_floating_ip(floating_ip)
- self.floating_ip_tuple = Floating_IP_tuple(
- floating_ip, None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], port_id=None)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ self.floating_ip_tuple = Floating_IP_tuple(floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
# create a new server for the floating ip
server = self._create_server(self.network)
- self._associate_floating_ip(floating_ip, server)
- self.floating_ip_tuple = Floating_IP_tuple(
- floating_ip, server)
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], port_id=port_id)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def _create_new_network(self, create_gateway=False):
self.new_net = self._create_network()
if create_gateway:
- self.new_subnet = self._create_subnet(
+ self.new_subnet = self.create_subnet(
network=self.new_net)
else:
- self.new_subnet = self._create_subnet(
+ self.new_subnet = self.create_subnet(
network=self.new_net,
gateway_ip=None)
@@ -355,9 +354,15 @@
self.check_remote_connectivity(ssh_source, remote_ip,
should_connect)
+ def _update_router_admin_state(self, router, admin_state_up):
+ kwargs = dict(admin_state_up=admin_state_up)
+ router = self.routers_client.update_router(
+ router['id'], **kwargs)['router']
+ self.assertEqual(admin_state_up, router['admin_state_up'])
+
@decorators.attr(type='smoke')
@decorators.idempotent_id('f323b3ba-82f8-4db7-8ea6-6a895869ec49')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_network_basic_ops(self):
"""Basic network operation test
@@ -409,13 +414,17 @@
"floating ip")
@decorators.idempotent_id('b158ea55-472e-4086-8fa9-c64ac0c6c1d0')
- @testtools.skipUnless(test.is_extension_enabled('net-mtu', 'network'),
+ @testtools.skipUnless(utils.is_extension_enabled('net-mtu', 'network'),
'No way to calculate MTU for networks')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_mtu_sized_frames(self):
"""Validate that network MTU sized frames fit through."""
self._setup_network_and_servers()
+ # first check that connectivity works in general for the instance
+ self.check_public_network_connectivity(should_connect=True)
+ # now that we checked general connectivity, test that full size frames
+ # can also pass between nodes
self.check_public_network_connectivity(
should_connect=True, mtu=self.network['mtu'])
@@ -425,7 +434,7 @@
'multitenant network environment')
@decorators.skip_because(bug="1610994")
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_connectivity_between_vms_on_different_networks(self):
"""Test connectivity between VMs on different networks
@@ -479,7 +488,7 @@
@testtools.skipIf(CONF.network.port_vnic_type in ['direct', 'macvtap'],
'NIC hotplug not supported for '
'vnic_type direct or macvtap')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_hotplug_nic(self):
"""Test hotplug network interface
@@ -501,7 +510,7 @@
'Router state can be altered only with multitenant '
'networks capabilities')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_update_router_admin_state(self):
"""Test to update admin state up of router
@@ -535,7 +544,7 @@
@testtools.skipUnless(CONF.scenario.dhcp_client,
"DHCP client is not available.")
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_subnet_details(self):
"""Tests that subnet's extra configuration details are affecting VMs.
@@ -619,7 +628,7 @@
"Changing a port's admin state is not supported "
"by the test environment")
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_update_instance_port_admin_state(self):
"""Test to update admin_state_up attribute of instance port
@@ -666,7 +675,7 @@
@decorators.idempotent_id('759462e1-8535-46b0-ab3a-33aa45c55aaa')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_preserve_preexisting_port(self):
"""Test preserve pre-existing port
@@ -715,10 +724,10 @@
'server %s.' % server['id'])
self.assertEqual(port['id'], port_list[0]['id'])
- @test.requires_ext(service='network', extension='l3_agent_scheduler')
+ @utils.requires_ext(service='network', extension='l3_agent_scheduler')
@decorators.idempotent_id('2e788c46-fb3f-4ac9-8f82-0561555bea73')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_router_rescheduling(self):
"""Tests that router can be removed from agent and add to a new agent.
@@ -793,12 +802,12 @@
should_connect=True,
msg='After router rescheduling')
- @test.requires_ext(service='network', extension='port-security')
+ @utils.requires_ext(service='network', extension='port-security')
@testtools.skipUnless(CONF.compute_feature_enabled.interface_attach,
'NIC hotplug not available')
@decorators.idempotent_id('7c0bb1a2-d053-49a4-98f9-ca1a1d849f63')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_port_security_macspoofing_port(self):
"""Tests port_security extension enforces mac spoofing
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index bf26c2e..9f4e62b 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -12,13 +12,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import functools
-
+from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -78,9 +76,9 @@
if dualnet:
network_v6 = self._create_network()
- sub4 = self._create_subnet(network=network,
- namestart='sub4',
- ip_version=4)
+ sub4 = self.create_subnet(network=network,
+ namestart='sub4',
+ ip_version=4)
router = self._get_router()
self.routers_client.add_router_interface(router['id'],
@@ -93,11 +91,11 @@
self.subnets_v6 = []
for _ in range(n_subnets6):
net6 = network_v6 if dualnet else network
- sub6 = self._create_subnet(network=net6,
- namestart='sub6',
- ip_version=6,
- ipv6_ra_mode=address6_mode,
- ipv6_address_mode=address6_mode)
+ sub6 = self.create_subnet(network=net6,
+ namestart='sub6',
+ ip_version=6,
+ ipv6_ra_mode=address6_mode,
+ ipv6_address_mode=address6_mode)
self.routers_client.add_router_interface(router['id'],
subnet_id=sub6['id'])
@@ -132,7 +130,7 @@
ssh = self.get_remote_client(
ip_address=fip['floating_ip_address'],
username=username, server=srv)
- return ssh, ips, srv["id"]
+ return ssh, ips, srv
def turn_nic6_on(self, ssh, sid, network_id):
"""Turns the IPv6 vNIC on
@@ -163,8 +161,8 @@
n_subnets6=n_subnets6,
dualnet=dualnet)
- sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
- sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)
+ sshv4_1, ips_from_api_1, srv1 = self.prepare_server(networks=net_list)
+ sshv4_2, ips_from_api_2, srv2 = self.prepare_server(networks=net_list)
def guest_has_address(ssh, addr):
return addr in ssh.exec_command("ip address")
@@ -172,8 +170,8 @@
# Turn on 2nd NIC for Cirros when dualnet
if dualnet:
_, network_v6 = net_list
- self.turn_nic6_on(sshv4_1, sid1, network_v6['id'])
- self.turn_nic6_on(sshv4_2, sid2, network_v6['id'])
+ self.turn_nic6_on(sshv4_1, srv1['id'], network_v6['id'])
+ self.turn_nic6_on(sshv4_2, srv2['id'], network_v6['id'])
# get addresses assigned to vNIC as reported by 'ip address' utility
ips_from_ip_1 = sshv4_1.exec_command("ip address")
@@ -183,17 +181,19 @@
for i in range(n_subnets6):
# v6 should be configured since the image supports it
# It can take time for ipv6 automatic address to get assigned
- srv1_v6_addr_assigned = functools.partial(
- guest_has_address, sshv4_1, ips_from_api_1['6'][i])
-
- srv2_v6_addr_assigned = functools.partial(
- guest_has_address, sshv4_2, ips_from_api_2['6'][i])
-
- self.assertTrue(test_utils.call_until_true(srv1_v6_addr_assigned,
- CONF.validation.ping_timeout, 1))
-
- self.assertTrue(test_utils.call_until_true(srv2_v6_addr_assigned,
- CONF.validation.ping_timeout, 1))
+ for srv, ssh, ips in (
+ (srv1, sshv4_1, ips_from_api_1),
+ (srv2, sshv4_2, ips_from_api_2)):
+ ip = ips['6'][i]
+ result = test_utils.call_until_true(
+ guest_has_address,
+ CONF.validation.ping_timeout, 1, ssh, ip)
+ if not result:
+ self._log_console_output(servers=[srv])
+ self.fail(
+ 'Address %s not configured for instance %s, '
+ 'ip address output is\n%s' %
+ (ip, srv['id'], ssh.exec_command("ip address")))
self.check_remote_connectivity(sshv4_1, ips_from_api_2['4'])
self.check_remote_connectivity(sshv4_2, ips_from_api_1['4'])
@@ -210,49 +210,49 @@
@decorators.attr(type='slow')
@decorators.idempotent_id('2c92df61-29f0-4eaa-bee3-7c65bef62a43')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_slaac_from_os(self):
self._prepare_and_test(address6_mode='slaac')
@decorators.attr(type='slow')
@decorators.idempotent_id('d7e1f858-187c-45a6-89c9-bdafde619a9f')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_dhcp6_stateless_from_os(self):
self._prepare_and_test(address6_mode='dhcpv6-stateless')
@decorators.attr(type='slow')
@decorators.idempotent_id('7ab23f41-833b-4a16-a7c9-5b42fe6d4123')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_multi_prefix_dhcpv6_stateless(self):
self._prepare_and_test(address6_mode='dhcpv6-stateless', n_subnets6=2)
@decorators.attr(type='slow')
@decorators.idempotent_id('dec222b1-180c-4098-b8c5-cc1b8342d611')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_multi_prefix_slaac(self):
self._prepare_and_test(address6_mode='slaac', n_subnets6=2)
@decorators.attr(type='slow')
@decorators.idempotent_id('b6399d76-4438-4658-bcf5-0d6c8584fde2')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_dualnet_slaac_from_os(self):
self._prepare_and_test(address6_mode='slaac', dualnet=True)
@decorators.attr(type='slow')
@decorators.idempotent_id('76f26acd-9688-42b4-bc3e-cd134c4cb09e')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_dualnet_dhcp6_stateless_from_os(self):
self._prepare_and_test(address6_mode='dhcpv6-stateless', dualnet=True)
@decorators.attr(type='slow')
@decorators.idempotent_id('cf1c4425-766b-45b8-be35-e2959728eb00')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_dualnet_multi_prefix_dhcpv6_stateless(self):
self._prepare_and_test(address6_mode='dhcpv6-stateless', n_subnets6=2,
dualnet=True)
@decorators.idempotent_id('9178ad42-10e4-47e9-8987-e02b170cc5cd')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_dualnet_multi_prefix_slaac(self):
self._prepare_and_test(address6_mode='slaac', n_subnets6=2,
dualnet=True)
diff --git a/tempest/scenario/test_object_storage_basic_ops.py b/tempest/scenario/test_object_storage_basic_ops.py
index 25e9f5c..cbe321e 100644
--- a/tempest/scenario/test_object_storage_basic_ops.py
+++ b/tempest/scenario/test_object_storage_basic_ops.py
@@ -13,14 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.common import utils
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
class TestObjectStorageBasicOps(manager.ObjectStorageScenarioTest):
@decorators.idempotent_id('b920faf1-7b8a-4657-b9fe-9c4512bfb381')
- @test.services('object_storage')
+ @utils.services('object_storage')
def test_swift_basic_ops(self):
"""Test swift basic ops.
@@ -47,7 +47,7 @@
@decorators.idempotent_id('916c7111-cb1f-44b2-816d-8f760e4ea910')
@decorators.attr(type='slow')
- @test.services('object_storage')
+ @utils.services('object_storage')
def test_swift_acl_anonymous_download(self):
"""This test will cover below steps:
@@ -58,12 +58,18 @@
5. Delete the object and container
"""
container_name = self.create_container()
- obj_name, _ = self.upload_object_to_container(container_name)
+ obj_name, obj_data = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
resp, _ = self.object_client.raw_request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
-
- self.change_container_acl(container_name, '.r:*')
- resp, _ = self.object_client.raw_request(obj_url, 'GET')
+ metadata_param = {'X-Container-Read': '.r:*'}
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name, create_update_metadata=metadata_param,
+ create_update_metadata_prefix='')
+ resp, _ = self.container_client.list_container_metadata(container_name)
+ self.assertEqual(metadata_param['X-Container-Read'],
+ resp['x-container-read'])
+ resp, data = self.object_client.raw_request(obj_url, 'GET')
self.assertEqual(resp.status, 200)
+ self.assertEqual(obj_data, data)
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 41c60f1..e39afe0 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -15,12 +15,13 @@
from oslo_log import log
import testtools
+from tempest.common import compute
+from tempest.common import utils
from tempest.common.utils import net_info
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
@@ -141,7 +142,7 @@
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
- if not test.is_extension_enabled('security-group', 'network'):
+ if not utils.is_extension_enabled('security-group', 'network'):
msg = "security-group extension not enabled."
raise cls.skipException(msg)
if CONF.network.shared_physical_network:
@@ -162,7 +163,7 @@
super(TestSecurityGroupsBasicOps, cls).resource_setup()
cls.multi_node = CONF.compute.min_compute_nodes > 1 and \
- test.is_scheduler_filter_enabled("DifferentHostFilter")
+ compute.is_scheduler_filter_enabled("DifferentHostFilter")
if cls.multi_node:
LOG.info("Working in Multi Node mode")
else:
@@ -470,7 +471,7 @@
servers=[tenant.access_point], client=client)
@decorators.idempotent_id('e79f879e-debb-440c-a7e4-efeda05b6848')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_cross_tenant_traffic(self):
if not self.credentials_provider.is_multi_tenant():
raise self.skipException("No secondary tenant defined")
@@ -490,7 +491,7 @@
raise
@decorators.idempotent_id('63163892-bbf6-4249-aa12-d5ea1f8f421b')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_in_tenant_traffic(self):
try:
self._create_tenant_servers(self.primary_tenant, num=1)
@@ -504,7 +505,7 @@
@decorators.idempotent_id('f4d556d7-1526-42ad-bafb-6bebf48568f6')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_port_update_new_security_group(self):
"""Verifies the traffic after updating the vm port
@@ -558,7 +559,7 @@
@decorators.idempotent_id('d2f77418-fcc4-439d-b935-72eca704e293')
@decorators.attr(type='slow')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_multiple_security_groups(self):
"""Verify multiple security groups and checks that rules
@@ -590,9 +591,9 @@
should_connect=True)
@decorators.attr(type='slow')
- @test.requires_ext(service='network', extension='port-security')
+ @utils.requires_ext(service='network', extension='port-security')
@decorators.idempotent_id('7c811dcc-263b-49a3-92d2-1b4d8405f50c')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_port_security_disable_security_group(self):
"""Verify the default security group rules is disabled."""
new_tenant = self.primary_tenant
@@ -630,7 +631,7 @@
raise
@decorators.attr(type='slow')
- @test.requires_ext(service='network', extension='port-security')
+ @utils.requires_ext(service='network', extension='port-security')
@decorators.idempotent_id('13ccf253-e5ad-424b-9c4a-97b88a026699')
# TODO(mriedem): We shouldn't actually need to check this since neutron
# disables the port_security extension by default, but the problem is nova
@@ -640,7 +641,7 @@
@testtools.skipUnless(
CONF.network_feature_enabled.port_security,
'Port security must be enabled.')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_boot_into_disabled_port_security_network_without_secgroup(self):
tenant = self.primary_tenant
self._create_tenant_network(tenant, port_security_enabled=False)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 6d6318c..89b9fdd 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -16,11 +16,11 @@
from oslo_log import log as logging
import testtools
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -42,32 +42,10 @@
super(TestServerAdvancedOps, cls).setup_credentials()
@decorators.attr(type='slow')
- @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
- @testtools.skipUnless(CONF.compute_feature_enabled.resize,
- 'Resize is not available.')
- @test.services('compute', 'volume')
- def test_resize_volume_backed_server_confirm(self):
- # We create an instance for use in this test
- instance = self.create_server(volume_backed=True)
- instance_id = instance['id']
- resize_flavor = CONF.compute.flavor_ref_alt
- LOG.debug("Resizing instance %s from flavor %s to flavor %s",
- instance['id'], instance['flavor']['id'], resize_flavor)
- self.servers_client.resize_server(instance_id, resize_flavor)
- waiters.wait_for_server_status(self.servers_client, instance_id,
- 'VERIFY_RESIZE')
-
- LOG.debug("Confirming resize of instance %s", instance_id)
- self.servers_client.confirm_resize_server(instance_id)
-
- waiters.wait_for_server_status(self.servers_client, instance_id,
- 'ACTIVE')
-
- @decorators.attr(type='slow')
@decorators.idempotent_id('949da7d5-72c8-4808-8802-e3d70df98e2c')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
- @test.services('compute')
+ @utils.services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
instance_id = self.create_server()['id']
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 0c441ab..d5c378e 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -16,6 +16,7 @@
import json
import re
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
@@ -23,7 +24,6 @@
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -132,7 +132,7 @@
@decorators.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
@decorators.attr(type='smoke')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_server_basic_ops(self):
keypair = self.create_keypair()
security_group = self._create_security_group()
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index 552ab27..fdf875c 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -36,7 +36,7 @@
@decorators.idempotent_id('9cecbe35-b9d4-48da-a37e-7ce70aa43d30')
@decorators.attr(type='smoke')
- @test.services('compute', 'network')
+ @utils.services('compute', 'network')
def test_schedule_to_all_nodes(self):
available_zone = \
self.os_admin.availability_zone_client.list_availability_zones(
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index fc04b44..68f18d1 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -16,11 +16,11 @@
import testtools
from tempest.common import compute
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -78,7 +78,7 @@
@decorators.idempotent_id('1164e700-0af0-4a4c-8792-35909a88743c')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
- @test.services('compute', 'network', 'image')
+ @utils.services('compute', 'network', 'image')
def test_shelve_instance(self):
self._create_server_then_shelve_and_unshelve()
@@ -86,6 +86,6 @@
@decorators.idempotent_id('c1b6318c-b9da-490b-9c67-9339b627271f')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
- @test.services('compute', 'volume', 'network', 'image')
+ @utils.services('compute', 'volume', 'network', 'image')
def test_shelve_volume_backed_instance(self):
self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 52767dc..b51a781 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,10 +15,10 @@
import testtools
+from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
@@ -44,7 +44,7 @@
@decorators.attr(type='slow')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
- @test.services('compute', 'network', 'image')
+ @utils.services('compute', 'network', 'image')
def test_snapshot_pattern(self):
# prepare for booting an instance
keypair = self.create_keypair()
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index debd664..ef369d6 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -16,14 +16,12 @@
from oslo_log import log as logging
import testtools
-from tempest.common import waiters
+from tempest.common import utils
from tempest import config
-from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -57,20 +55,6 @@
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- def _create_volume_snapshot(self, volume):
- snapshot_name = data_utils.rand_name('scenario-snapshot')
- snapshot = self.snapshots_client.create_snapshot(
- volume_id=volume['id'], display_name=snapshot_name)['snapshot']
- self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
- snapshot['id'])
- self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'available')
- waiters.wait_for_volume_resource_status(self.snapshots_client,
- snapshot['id'], 'available')
- self.assertEqual(snapshot_name, snapshot['name'])
- return snapshot
-
def _wait_for_volume_available_on_the_system(self, ip_address,
private_key):
ssh = self.get_remote_client(ip_address, private_key=private_key)
@@ -92,7 +76,7 @@
'Snapshotting is not available.')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
- @test.services('compute', 'network', 'volume', 'image')
+ @utils.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting an instance
keypair = self.create_keypair()
@@ -116,7 +100,7 @@
self.nova_volume_detach(server, volume)
# snapshot the volume
- volume_snapshot = self._create_volume_snapshot(volume)
+ volume_snapshot = self.create_volume_snapshot(volume['id'])
# snapshot the instance
snapshot_image = self.create_server_snapshot(server=server)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 3dfbf18..beb039c 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -13,18 +13,18 @@
from oslo_log import log as logging
import testtools
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
-class TestVolumeBootPattern(manager.ScenarioTest):
+class TestVolumeBootPattern(manager.EncryptionScenarioTest):
# Boot from volume scenario is quite slow, and needs extra
# breathing room to get through deletes in the time allotted.
@@ -69,21 +69,6 @@
return self.create_server(image_id='', **create_kwargs)
- def _create_snapshot_from_volume(self, vol_id):
- snap_name = data_utils.rand_name(
- self.__class__.__name__ + '-snapshot')
- snap = self.snapshots_client.create_snapshot(
- volume_id=vol_id,
- force=True,
- display_name=snap_name)['snapshot']
- self.addCleanup(
- self.snapshots_client.wait_for_resource_deletion, snap['id'])
- self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
- waiters.wait_for_volume_resource_status(self.snapshots_client,
- snap['id'], 'available')
- self.assertEqual(snap_name, snap['name'])
- return snap
-
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
@@ -91,7 +76,7 @@
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
- @test.services('compute', 'volume', 'image')
+ @utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
"""This test case attempts to reproduce the following steps:
@@ -147,7 +132,7 @@
# snapshot a volume
LOG.info("Creating snapshot from volume: %s", volume_origin['id'])
- snapshot = self._create_snapshot_from_volume(volume_origin['id'])
+ snapshot = self.create_volume_snapshot(volume_origin['id'], force=True)
# create a 3rd instance from snapshot
LOG.info("Creating third instance from snapshot: %s", snapshot['id'])
@@ -171,13 +156,13 @@
@decorators.idempotent_id('05795fb2-b2a7-4c9f-8fac-ff25aedb1489')
@decorators.attr(type='slow')
- @test.services('compute', 'image', 'volume')
+ @utils.services('compute', 'image', 'volume')
def test_create_server_from_volume_snapshot(self):
# Create a volume from an image
boot_volume = self._create_volume_from_image()
# Create a snapshot
- boot_snapshot = self._create_snapshot_from_volume(boot_volume['id'])
+ boot_snapshot = self.create_volume_snapshot(boot_volume['id'])
# Create a server from a volume snapshot
server = self._boot_instance_from_resource(
@@ -207,7 +192,7 @@
created_volume_info['attachments'][0]['volume_id'])
@decorators.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
- @test.services('compute', 'volume', 'image')
+ @utils.services('compute', 'volume', 'image')
def test_create_ebs_image_and_check_boot(self):
# create an instance from volume
volume_origin = self._create_volume_from_image()
@@ -223,7 +208,41 @@
# boot instance from EBS image
instance = self.create_server(image_id=image['id'])
- # just ensure that instance booted
+
+ # Verify the server was created from the image
+ created_volume = instance['os-extended-volumes:volumes_attached']
+ self.assertNotEmpty(created_volume, "No volume attachment found.")
+ created_volume_info = self.volumes_client.show_volume(
+ created_volume[0]['id'])['volume']
+ self.assertEqual(instance['id'],
+ created_volume_info['attachments'][0]['server_id'])
+ self.assertEqual(created_volume[0]['id'],
+ created_volume_info['attachments'][0]['volume_id'])
+ self.assertEqual(
+ volume_origin['volume_image_metadata']['image_id'],
+ created_volume_info['volume_image_metadata']['image_id'])
# delete instance
self._delete_server(instance)
+
+ @decorators.idempotent_id('cb78919a-e553-4bab-b73b-10cf4d2eb125')
+ @testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
+ 'Encrypted volume attach is not supported')
+ @utils.services('compute', 'volume')
+ def test_boot_server_from_encrypted_volume_luks(self):
+ # Create an encrypted volume
+ volume = self.create_encrypted_volume('nova.volume.encryptors.'
+ 'luks.LuksEncryptor',
+ volume_type='luks')
+
+ self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
+
+ # Boot a server from the encrypted volume
+ server = self._boot_instance_from_resource(
+ source_id=volume['id'],
+ source_type='volume',
+ delete_on_termination=False)
+
+ server_info = self.servers_client.show_server(server['id'])['server']
+ created_volume = server_info['os-extended-volumes:volumes_attached']
+ self.assertEqual(volume['id'], created_volume[0]['id'])
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index 81b71b1..cd10bbd 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -12,11 +12,11 @@
from oslo_log import log as logging
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
-from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -38,12 +38,6 @@
credentials = ['primary', 'admin']
@classmethod
- def setup_clients(cls):
- super(TestVolumeMigrateRetypeAttached, cls).setup_clients()
- cls.admin_volume_types_client = cls.os_admin.volume_types_v2_client
- cls.admin_volumes_client = cls.os_admin.volumes_v2_client
-
- @classmethod
def skip_checks(cls):
super(TestVolumeMigrateRetypeAttached, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
@@ -83,7 +77,7 @@
def _volume_retype_with_migration(self, volume_id, new_volume_type):
migration_policy = 'on-demand'
- self.admin_volumes_client.retype_volume(
+ self.volumes_client.retype_volume(
volume_id, new_type=new_volume_type,
migration_policy=migration_policy)
waiters.wait_for_volume_retype(self.volumes_client,
@@ -91,7 +85,7 @@
@decorators.attr(type='slow')
@decorators.idempotent_id('deadd2c2-beef-4dce-98be-f86765ff311b')
- @test.services('compute', 'volume')
+ @utils.services('compute', 'volume')
def test_volume_migrate_attached(self):
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
deleted file mode 100644
index afedd36..0000000
--- a/tempest/services/object_storage/container_client.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from xml.etree import ElementTree as etree
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class ContainerClient(rest_client.RestClient):
-
- def create_container(
- self, container_name,
- metadata=None,
- remove_metadata=None,
- metadata_prefix='X-Container-Meta-',
- remove_metadata_prefix='X-Remove-Container-Meta-'):
- """Creates a container
-
- with optional metadata passed in as a dictionary
- """
- url = str(container_name)
- headers = {}
-
- if metadata is not None:
- for key in metadata:
- headers[metadata_prefix + key] = metadata[key]
- if remove_metadata is not None:
- for key in remove_metadata:
- headers[remove_metadata_prefix + key] = remove_metadata[key]
-
- resp, body = self.put(url, body=None, headers=headers)
- self.expected_success([201, 202], resp.status)
- return resp, body
-
- def delete_container(self, container_name):
- """Deletes the container (if it's empty)."""
- url = str(container_name)
- resp, body = self.delete(url)
- self.expected_success(204, resp.status)
- return resp, body
-
- def update_container_metadata(
- self, container_name,
- metadata=None,
- remove_metadata=None,
- metadata_prefix='X-Container-Meta-',
- remove_metadata_prefix='X-Remove-Container-Meta-'):
- """Updates arbitrary metadata on container."""
- url = str(container_name)
- headers = {}
-
- if metadata is not None:
- for key in metadata:
- headers[metadata_prefix + key] = metadata[key]
- if remove_metadata is not None:
- for key in remove_metadata:
- headers[remove_metadata_prefix + key] = remove_metadata[key]
-
- resp, body = self.post(url, body=None, headers=headers)
- self.expected_success(204, resp.status)
- return resp, body
-
- def delete_container_metadata(self, container_name, metadata,
- metadata_prefix='X-Remove-Container-Meta-'):
- """Deletes arbitrary metadata on container."""
- url = str(container_name)
- headers = {}
-
- if metadata is not None:
- for item in metadata:
- headers[metadata_prefix + item] = metadata[item]
-
- resp, body = self.post(url, body=None, headers=headers)
- self.expected_success(204, resp.status)
- return resp, body
-
- def list_container_metadata(self, container_name):
- """Retrieves container metadata headers"""
- url = str(container_name)
- resp, body = self.head(url)
- self.expected_success(204, resp.status)
- return resp, body
-
- def list_container_contents(self, container, params=None):
- """List the objects in a container, given the container name
-
- Returns the container object listing as a plain text list, or as
- xml or json if that option is specified via the 'format' argument.
-
- Optional Arguments:
- limit = integer
- For an integer value n, limits the number of results to at most
- n values.
-
- marker = 'string'
- Given a string value x, return object names greater in value
- than the specified marker.
-
- prefix = 'string'
- For a string value x, causes the results to be limited to names
- beginning with the substring x.
-
- format = 'json' or 'xml'
- Specify either json or xml to return the respective serialized
- response.
- If json, returns a list of json objects
- if xml, returns a string of xml
-
- path = 'string'
- For a string value x, return the object names nested in the
- pseudo path (assuming preconditions are met - see below).
-
- delimiter = 'character'
- For a character c, return all the object names nested in the
- container (without the need for the directory marker objects).
- """
-
- url = str(container)
- if params:
- url += '?'
- url += '&%s' % urllib.urlencode(params)
-
- resp, body = self.get(url, headers={})
- if params and params.get('format') == 'json':
- body = json.loads(body)
- elif params and params.get('format') == 'xml':
- body = etree.fromstring(body)
- # Else the content-type is plain/text
- else:
- body = [
- obj_name for obj_name in body.decode().split('\n') if obj_name
- ]
-
- self.expected_success([200, 204], resp.status)
- return resp, body
diff --git a/tempest/test.py b/tempest/test.py
index a81b5d7..9da85d5 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -14,7 +14,6 @@
# under the License.
import atexit
-import functools
import os
import sys
@@ -26,10 +25,10 @@
from tempest import clients
from tempest.common import credentials_factory as credentials
-from tempest.common import fixed_network
-import tempest.common.validation_resources as vresources
+from tempest.common import utils
from tempest import config
-from tempest.lib.common import cred_client
+from tempest.lib.common import fixed_network
+from tempest.lib.common import validation_resources as vr
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -44,119 +43,24 @@
version='Mitaka', removal_version='?')
-related_bug = debtcollector.moves.moved_function(
- decorators.related_bug, 'related_bug', __name__,
- version='Pike', removal_version='?')
-
-
attr = debtcollector.moves.moved_function(
decorators.attr, 'attr', __name__,
version='Pike', removal_version='?')
-class InvalidServiceTag(lib_exc.TempestException):
- message = "Invalid service tag"
+services = debtcollector.moves.moved_function(
+ utils.services, 'services', __name__,
+ version='Pike', removal_version='?')
-def get_service_list():
- service_list = {
- 'compute': CONF.service_available.nova,
- 'image': CONF.service_available.glance,
- 'volume': CONF.service_available.cinder,
- 'network': True,
- 'identity': True,
- 'object_storage': CONF.service_available.swift,
- }
- return service_list
+requires_ext = debtcollector.moves.moved_function(
+ utils.requires_ext, 'requires_ext', __name__,
+ version='Pike', removal_version='?')
-def services(*args):
- """A decorator used to set an attr for each service used in a test case
-
- This decorator applies a testtools attr for each service that gets
- exercised by a test case.
- """
- def decorator(f):
- known_services = get_service_list()
-
- for service in args:
- if service not in known_services:
- raise InvalidServiceTag('%s is not a valid service' % service)
- decorators.attr(type=list(args))(f)
-
- @functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
- service_list = get_service_list()
-
- for service in args:
- if not service_list[service]:
- msg = 'Skipped because the %s service is not available' % (
- service)
- raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
- return wrapper
- return decorator
-
-
-def requires_ext(**kwargs):
- """A decorator to skip tests if an extension is not enabled
-
- @param extension
- @param service
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*func_args, **func_kwargs):
- if not is_extension_enabled(kwargs['extension'],
- kwargs['service']):
- msg = "Skipped because %s extension: %s is not enabled" % (
- kwargs['service'], kwargs['extension'])
- raise testtools.TestCase.skipException(msg)
- return func(*func_args, **func_kwargs)
- return wrapper
- return decorator
-
-
-def is_extension_enabled(extension_name, service):
- """A function that will check the list of enabled extensions from config
-
- """
- config_dict = {
- 'compute': CONF.compute_feature_enabled.api_extensions,
- 'volume': CONF.volume_feature_enabled.api_extensions,
- 'network': CONF.network_feature_enabled.api_extensions,
- 'object': CONF.object_storage_feature_enabled.discoverable_apis,
- 'identity': CONF.identity_feature_enabled.api_extensions
- }
- if not config_dict[service]:
- return False
- if config_dict[service][0] == 'all':
- return True
- if extension_name in config_dict[service]:
- return True
- return False
-
-
-def is_scheduler_filter_enabled(filter_name):
- """Check the list of enabled compute scheduler filters from config.
-
- This function checks whether the given compute scheduler filter is
- available and configured in the config file. If the
- scheduler_available_filters option is set to 'all' (Default value. which
- means default filters are configured in nova) in tempest.conf then, this
- function returns True with assumption that requested filter 'filter_name'
- is one of available filter in nova ("nova.scheduler.filters.all_filters").
- """
-
- filters = CONF.compute_feature_enabled.scheduler_available_filters
- if not filters:
- return False
- if 'all' in filters:
- return True
- if filter_name in filters:
- return True
- return False
-
+is_extension_enabled = debtcollector.moves.moved_function(
+ utils.is_extension_enabled, 'is_extension_enabled', __name__,
+ version='Pike', removal_version='?')
at_exit_set = set()
@@ -193,16 +97,24 @@
- resource_cleanup
"""
- setUpClassCalled = False
-
# NOTE(andreaf) credentials holds a list of the credentials to be allocated
# at class setup time. Credential types can be 'primary', 'alt', 'admin' or
# a list of roles - the first element of the list being a label, and the
# rest the actual roles
credentials = []
+
+ # Track if setUpClass was invoked
+ __setupclass_called = False
+
+ # Network resources to be provisioned for the requested test credentials.
+ # Only used with the dynamic credentials provider.
+ _network_resources = {}
+
+ # Stack of resource cleanups
+ _class_cleanups = []
+
# Resources required to validate a server using ssh
- validation_resources = {}
- network_resources = {}
+ _validation_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
@@ -217,23 +129,39 @@
TIMEOUT_SCALING_FACTOR = 1
@classmethod
+ def _reset_class(cls):
+ cls.__setup_credentials_called = False
+ cls.__resource_cleanup_called = False
+ cls.__skip_checks_called = False
+ # Stack of callable to be invoked in reverse order
+ cls._class_cleanups = []
+ # Stack of (name, callable) to be invoked in reverse order at teardown
+ cls._teardowns = []
+
+ @classmethod
def setUpClass(cls):
+ cls.__setupclass_called = True
+ # Reset state
+ cls._reset_class()
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
- cls.setUpClassCalled = True
- # Stack of (name, callable) to be invoked in reverse order at teardown
- cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
+ if not cls.__skip_checks_called:
+ raise RuntimeError("skip_checks for %s did not call the super's "
+ "skip_checks" % cls.__name__)
try:
# Allocation of all required credentials and client managers
- cls.teardowns.append(('credentials', cls.clear_credentials))
+ cls._teardowns.append(('credentials', cls.clear_credentials))
cls.setup_credentials()
+ if not cls.__setup_credentials_called:
+ raise RuntimeError("setup_credentials for %s did not call the "
+ "super's setup_credentials" % cls.__name__)
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
- cls.teardowns.append(('resources', cls.resource_cleanup))
+ cls._teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
@@ -260,18 +188,29 @@
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
- while cls.teardowns:
- name, teardown = cls.teardowns.pop()
+ while cls._teardowns:
+ name, teardown = cls._teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
+ if name == 'resources':
+ if not cls.__resource_cleanup_called:
+ raise RuntimeError(
+ "resource_cleanup for %s did not call the "
+ "super's resource_cleanup" % cls.__name__)
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
- # TODO(andreaf): Till we have the ability to cleanup only
- # resources that were successfully setup in resource_cleanup,
- # log AttributeError as info instead of exception.
+ # TODO(andreaf): Resource cleanup is often implemented by
+ # storing an array of resources at class level, and cleaning
+ # them up during `resource_cleanup`.
+ # In case of failure during setup, some resource arrays might
+ # not be defined at all, in which case the cleanup code might
+ # trigger an AttributeError. In such cases we log
+ # AttributeError as info instead of exception. Once all
+ # cleanups are migrated to addClassResourceCleanup we can
+ # remove this.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s", name, te)
else:
@@ -307,18 +246,45 @@
"""Class level skip checks.
Subclasses verify in here all conditions that might prevent the
- execution of the entire test class.
- Checks implemented here may not make use API calls, and should rely on
- configuration alone.
- In general skip checks that require an API call are discouraged.
- If one is really needed it may be implemented either in the
- resource_setup or at test level.
+ execution of the entire test class. Skipping here prevents any other
+ class fixture from being executed i.e. no credentials or other
+ resource allocation will happen.
+
+ Tests defined in the test class will no longer appear in test results.
+ The `setUpClass` for the entire test class will be marked as SKIPPED
+ instead.
+
+ At this stage no test credentials are available, so skip checks
+ should rely on configuration alone. This is deliberate since skips
+ based on the result of an API call are discouraged.
+
+ The following checks are implemented in `test.py` already:
+ - check that alt credentials are available when requested by the test
+ - check that admin credentials are available when requested by the test
+ - check that the identity version specified by the test is marked as
+ enabled in the configuration
+
+ Overriders of skip_checks must always invoke skip_check on `super`
+ first.
+
+ Example::
+
+ @classmethod
+ def skip_checks(cls):
+ super(Example, cls).skip_checks()
+ if not CONF.service_available.my_service:
+ skip_msg = ("%s skipped as my_service is not available")
+ raise cls.skipException(skip_msg % cls.__name__)
"""
+ cls.__skip_checks_called = True
identity_version = cls.get_identity_version()
- if 'admin' in cls.credentials and not credentials.is_admin_available(
- identity_version=identity_version):
- msg = "Missing Identity Admin API credentials in configuration."
- raise cls.skipException(msg)
+ # setting force_tenant_isolation to True also needs admin credentials.
+ if ('admin' in cls.credentials or
+ getattr(cls, 'force_tenant_isolation', False)):
+ if not credentials.is_admin_available(
+ identity_version=identity_version):
+ raise cls.skipException(
+ "Missing Identity Admin API credentials in configuration.")
if 'alt' in cls.credentials and not credentials.is_alt_available(
identity_version=identity_version):
msg = "Missing a 2nd set of API credentials in configuration."
@@ -335,13 +301,67 @@
def setup_credentials(cls):
"""Allocate credentials and create the client managers from them.
- For every element of credentials param function creates tenant/user,
- Then it creates client manager for that credential.
+ `setup_credentials` looks for the content of the `credentials`
+ attribute in the test class. If the value is a non-empty collection,
+ a credentials provider is setup, and credentials are provisioned or
+ allocated based on the content of the collection. Every set of
+ credentials is associated to an object of type `cls.client_manager`.
+ The client manager is accessible by tests via class attribute
+ `os_[type]`:
- Network related tests must override this function with
- set_network_resources() method, otherwise it will create
- network resources(network resources are created in a later step).
+ Valid values in `credentials` are:
+ - 'primary':
+ A normal user is provisioned.
+ It can be used only once. Multiple entries will be ignored.
+ Clients are available at os_primary.
+ - 'alt':
+ A normal user other than 'primary' is provisioned.
+ It can be used only once. Multiple entries will be ignored.
+ Clients are available at os_alt.
+ - 'admin':
+ An admin user is provisioned.
+ It can be used only once. Multiple entries will be ignored.
+ Clients are available at os_admin.
+ - A list in the format ['any_label', 'role1', ... , 'roleN']:
+ A client with roles <list>[1:] is provisioned.
+ It can be used multiple times, with unique labels.
+ Clients are available at os_roles_<list>[0].
+
+ By default network resources are allocated (in case of dynamic
+ credentials). Tests that do not need network or that require a
+ custom network setup must specify which network resources shall
+ be provisioned using the `set_network_resources()` method (note
+ that it must be invoked before the `setup_credentials` is
+ invoked on super).
+
+ Example::
+
+ class TestWithCredentials(test.BaseTestCase):
+
+ credentials = ['primary', 'admin',
+ ['special', 'special_role1']]
+
+ @classmethod
+ def setup_credentials(cls):
+ # set_network_resources must be called first
+ cls.set_network_resources(network=True)
+ super(TestWithCredentials, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ cls.servers = cls.os_primary.compute.ServersClient()
+ cls.admin_servers = cls.os_admin.compute.ServersClient()
+ # certain API calls may require a user with a specific
+ # role assigned. In this example `special_role1` is
+ # assigned to the user in `cls.os_roles_special`.
+ cls.special_servers = (
+ cls.os_roles_special.compute.ServersClient())
+
+ def test_special_servers(self):
+ # Do something with servers
+ pass
"""
+ cls.__setup_credentials_called = True
for credentials_type in cls.credentials:
# This may raise an exception in case credentials are not available
# In that case we want to let the exception through and the test
@@ -383,41 +403,184 @@
@classmethod
def setup_clients(cls):
- """Create links to the clients into the test object."""
- # TODO(andreaf) There is a fair amount of code that could me moved from
- # base / test classes in here. Ideally tests should be able to only
- # specify which client is `client` and nothing else.
+ """Create aliases to the clients in the client managers.
+
+ `setup_clients` is invoked after the credential provisioning step.
+ Client manager objects are available to tests already. The purpose
+ of this helper is to setup shortcuts to specific clients that are
+ useful for the tests implemented in the test class.
+
+ Its purpose is mostly for code readability, however it should be used
+ carefully to avoid doing exactly the opposite, i.e. making the code
+ unreadable and hard to debug. If aliases are defined in a super class
+ it won't be obvious what they refer to, so it's good practice to define
+ all aliases used in the class. Aliases are meant to be shortcuts to
+ be used in tests, not shortcuts to avoid helper method attributes.
+ If an helper method starts relying on a client alias and a subclass
+ overrides that alias, it will become rather difficult to understand
+ what the helper method actually does.
+
+ Example::
+
+ class TestDoneItRight(test.BaseTestCase):
+
+ credentials = ['primary', 'alt']
+
+ @classmethod
+ def setup_clients(cls):
+ super(TestDoneItRight, cls).setup_clients()
+ cls.servers = cls.os_primary.ServersClient()
+ cls.servers_alt = cls.os_alt.ServersClient()
+
+ def _a_good_helper(self, clients):
+ # Some complex logic we're going to use many times
+ servers = clients.ServersClient()
+ vm = servers.create_server(...)
+
+ def delete_server():
+ test_utils.call_and_ignore_notfound_exc(
+ servers.delete_server, vm['id'])
+
+ self.addCleanup(self.delete_server)
+ return vm
+
+ def test_with_servers(self):
+ vm = self._a_good_helper(os.primary)
+ vm_alt = self._a_good_helper(os.alt)
+ cls.servers.show_server(vm['id'])
+ cls.servers_alt.show_server(vm_alt['id'])
+ """
pass
@classmethod
def resource_setup(cls):
- """Class level resource setup for test cases."""
- if hasattr(cls, "os_primary"):
- cls.validation_resources = vresources.create_validation_resources(
- cls.os_primary, cls.validation_resources)
- else:
- LOG.warning("Client manager not found, validation resources not"
- " created")
+ """Class level resource setup for test cases.
+
+ `resource_setup` is invoked once all credentials (and related network
+ resources have been provisioned and after client aliases - if any -
+ have been defined.
+
+ The use case for `resource_setup` is test optimization: provisioning
+ of project-specific "expensive" resources that are not dirtied by tests
+ and can thus safely be re-used by multiple tests.
+
+ System wide resources shared by all tests could instead be provisioned
+ only once, before the test run.
+
+ Resources provisioned here must be cleaned up during
+ `resource_cleanup`. This is best achieved by scheduling a cleanup via
+ `addClassResourceCleanup`.
+
+ Some test resources have an asynchronous delete process. It's best
+ practice for them to schedule a wait for delete via
+ `addClassResourceCleanup` to avoid having resources in process of
+ deletion when we reach the credentials cleanup step.
+
+ Example::
+
+ @classmethod
+ def resource_setup(cls):
+ super(MyTest, cls).resource_setup()
+ servers = cls.os_primary.compute.ServersClient()
+ # Schedule delete and wait so that we can first delete the
+ # two servers and then wait for both to delete
+ # Create server 1
+ cls.shared_server = servers.create_server()
+ # Create server 2. If something goes wrong we schedule cleanup
+ # of server 1 anyways.
+ try:
+ cls.shared_server2 = servers.create_server()
+ # Wait server 2
+ cls.addClassResourceCleanup(
+ waiters.wait_for_server_termination,
+ servers, cls.shared_server2['id'],
+ ignore_error=False)
+ finally:
+ # Wait server 1
+ cls.addClassResourceCleanup(
+ waiters.wait_for_server_termination,
+ servers, cls.shared_server['id'],
+ ignore_error=False)
+ # Delete server 1
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ servers.delete_server,
+ cls.shared_server['id'])
+ # Delete server 2 (if it was created)
+ if hasattr(cls, 'shared_server2'):
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ servers.delete_server,
+ cls.shared_server2['id'])
+ """
+ pass
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
- Resource cleanup must be able to handle the case of partially setup
- resources, in case a failure during `resource_setup` should happen.
+ Resource cleanup processes the stack of cleanups produced by
+ `addClassResourceCleanup` and then cleans up validation resources
+ if any were provisioned.
+
+ All cleanups are processed whatever the outcome. Exceptions are
+ accumulated and re-raised as a `MultipleExceptions` at the end.
+
+ In most cases test cases won't need to override `resource_cleanup`,
+ but if they do they must invoke `resource_cleanup` on super.
+
+ Example::
+
+ class TestWithReallyComplexCleanup(test.BaseTestCase):
+
+ @classmethod
+ def resource_setup(cls):
+ # provision resource A
+ cls.addClassResourceCleanup(delete_resource, A)
+ # provision resource B
+ cls.addClassResourceCleanup(delete_resource, B)
+
+ @classmethod
+ def resource_cleanup(cls):
+ # It's possible to override resource_cleanup but in most
+ # cases it shouldn't be required. Nothing that may fail
+ # should be executed before the call to super since it
+ # might cause resource leak in case of error.
+ super(TestWithReallyComplexCleanup, cls).resource_cleanup()
+ # At this point test credentials are still available but
+ # anything from the cleanup stack has been already deleted.
"""
- if cls.validation_resources:
- if hasattr(cls, "os_primary"):
- vresources.clear_validation_resources(cls.os_primary,
- cls.validation_resources)
- cls.validation_resources = {}
- else:
- LOG.warning("Client manager not found, validation resources "
- "not deleted")
+ cls.__resource_cleanup_called = True
+ cleanup_errors = []
+ while cls._class_cleanups:
+ try:
+ fn, args, kwargs = cls._class_cleanups.pop()
+ fn(*args, **kwargs)
+ except Exception:
+ cleanup_errors.append(sys.exc_info())
+ if cleanup_errors:
+ raise testtools.MultipleExceptions(*cleanup_errors)
+
+ @classmethod
+ def addClassResourceCleanup(cls, fn, *arguments, **keywordArguments):
+ """Add a cleanup function to be called during resource_cleanup.
+
+ Functions added with addClassResourceCleanup will be called in reverse
+ order of adding at the beginning of resource_cleanup, before any
+ credential, networking or validation resources cleanup is processed.
+
+ If a function added with addClassResourceCleanup raises an exception,
+ the error will be recorded as a test error, and the next cleanup will
+ then be run.
+
+ Cleanup functions are always called during the test class tearDown
+ fixture, even if an exception occured during setUp or tearDown.
+ """
+ cls._class_cleanups.append((fn, arguments, keywordArguments))
def setUp(self):
super(BaseTestCase, self).setUp()
- if not self.setUpClassCalled:
+ if not self.__setupclass_called:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
@@ -448,37 +611,6 @@
def credentials_provider(self):
return self._get_credentials_provider()
- @property
- def identity_utils(self):
- """A client that abstracts v2 and v3 identity operations.
-
- This can be used for creating and tearing down projects in tests. It
- should not be used for testing identity features.
- """
- if CONF.identity.auth_version == 'v2':
- client = self.os_admin.identity_client
- users_client = self.os_admin.users_client
- project_client = self.os_admin.tenants_client
- roles_client = self.os_admin.roles_client
- domains_client = None
- else:
- client = self.os_admin.identity_v3_client
- users_client = self.os_admin.users_v3_client
- project_client = self.os_admin.projects_client
- roles_client = self.os_admin.roles_v3_client
- domains_client = self.os_admin.domains_client
-
- try:
- domain = client.auth_provider.credentials.project_domain_name
- except AttributeError:
- domain = 'Default'
-
- return cred_client.get_creds_client(client, project_client,
- users_client,
- roles_client,
- domains_client,
- project_domain_name=domain)
-
@classmethod
def get_identity_version(cls):
"""Returns the identity version used by the test class"""
@@ -500,7 +632,7 @@
False)
cls._creds_provider = credentials.get_credentials_provider(
- name=cls.__name__, network_resources=cls.network_resources,
+ name=cls.__name__, network_resources=cls._network_resources,
force_tenant_isolation=force_tenant_isolation)
return cls._creds_provider
@@ -555,62 +687,131 @@
if hasattr(cls, '_creds_provider'):
cls._creds_provider.clear_creds()
+ @staticmethod
+ def _validation_resources_params_from_conf():
+ return dict(
+ keypair=(CONF.validation.auth_method.lower() == "keypair"),
+ floating_ip=(CONF.validation.connect_method.lower() == "floating"),
+ security_group=CONF.validation.security_group,
+ security_group_rules=CONF.validation.security_group_rules,
+ use_neutron=CONF.service_available.neutron,
+ ethertype='IPv' + str(CONF.validation.ip_version_for_ssh),
+ floating_network_id=CONF.network.public_network_id,
+ floating_network_name=CONF.network.floating_network_name)
+
@classmethod
- def set_validation_resources(cls, keypair=None, floating_ip=None,
- security_group=None,
- security_group_rules=None):
- """Specify which ssh server validation resources should be created.
+ def get_class_validation_resources(cls, os_clients):
+ """Provision validation resources according to configuration
- Each of the argument must be set to either None, True or False, with
- None - use default from config (security groups and security group
- rules get created when set to None)
- False - Do not create the validation resource
- True - create the validation resource
+ This is a wrapper around `create_validation_resources` from
+ `tempest.common.validation_resources` that passes parameters from
+ Tempest configuration. Only one instance of class level
+ validation resources is managed by the helper, so If resources
+ were already provisioned before, existing ones will be returned.
- @param keypair
- @param security_group
- @param security_group_rules
- @param floating_ip
+ Resources are returned as a dictionary. They are also scheduled for
+ automatic cleanup during class teardown using
+ `addClassResourcesCleanup`.
+
+ If `CONF.validation.run_validation` is False no resource will be
+ provisioned at all.
+
+ @param os_clients: Clients to be used to provision the resources.
"""
if not CONF.validation.run_validation:
return
- if keypair is None:
- keypair = (CONF.validation.auth_method.lower() == "keypair")
+ if os_clients in cls._validation_resources:
+ return cls._validation_resources[os_clients]
- if floating_ip is None:
- floating_ip = (CONF.validation.connect_method.lower() ==
- "floating")
+ if (CONF.validation.ip_version_for_ssh not in (4, 6) and
+ CONF.service_available.neutron):
+ msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
+ raise lib_exc.InvalidConfiguration(
+ msg % CONF.validation.ip_version_for_ssh)
- if security_group is None:
- security_group = CONF.validation.security_group
+ resources = vr.create_validation_resources(
+ os_clients,
+ **cls._validation_resources_params_from_conf())
- if security_group_rules is None:
- security_group_rules = CONF.validation.security_group_rules
+ cls.addClassResourceCleanup(
+ vr.clear_validation_resources, os_clients,
+ use_neutron=CONF.service_available.neutron,
+ **resources)
+ cls._validation_resources[os_clients] = resources
+ return resources
- if not cls.validation_resources:
- cls.validation_resources = {
- 'keypair': keypair,
- 'security_group': security_group,
- 'security_group_rules': security_group_rules,
- 'floating_ip': floating_ip}
+ def get_test_validation_resources(self, os_clients):
+ """Returns a dict of validation resources according to configuration
+
+ Initialise a validation resources fixture based on configuration.
+ Start the fixture and returns the validation resources.
+
+ If `CONF.validation.run_validation` is False no resource will be
+ provisioned at all.
+
+ @param os_clients: Clients to be used to provision the resources.
+ """
+
+ params = {}
+ # Test will try to use the fixture, so for this to be useful
+ # we must return a fixture. If validation is disabled though
+ # we don't need to provision anything, which is the default
+ # behavior for the fixture.
+ if CONF.validation.run_validation:
+ params = self._validation_resources_params_from_conf()
+
+ validation = self.useFixture(
+ vr.ValidationResourcesFixture(os_clients, **params))
+ return validation.resources
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
+ The dynamic credentials provider by default provisions network
+ resources for each user/project that is provisioned. This behavior
+ can be altered using this method, which allows tests to define which
+ specific network resources to be provisioned - none if no parameter
+ is specified.
+
+ This method is designed so that only the network resources set on the
+ leaf class are honoured.
+
+ Credentials are provisioned as part of the class setup fixture,
+ during the `setup_credentials` step. For this to be effective this
+ helper must be invoked before super's `setup_credentials` is executed.
+
@param network
@param router
@param subnet
@param dhcp
+
+ Example::
+
+ @classmethod
+ def setup_credentials(cls):
+ # Do not setup network resources for this test
+ cls.set_network_resources()
+ super(MyTest, cls).setup_credentials()
"""
- # network resources should be set only once from callers
+ # If this is invoked after the credentials are setup, it won't take
+ # any effect. To avoid this situation, fail the test in case this was
+ # invoked too late in the test lifecycle.
+ if cls.__setup_credentials_called:
+ raise RuntimeError(
+ "set_network_resources invoked after setup_credentials on the "
+ "super class has been already invoked. For "
+ "set_network_resources to have effect please invoke it before "
+ "the call to super().setup_credentials")
+
+ # Network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
- # in the leaf class
- if not cls.network_resources:
- cls.network_resources = {
+ # in the leaf class.
+ if not cls._network_resources:
+ cls._network_resources = {
'network': network,
'router': router,
'subnet': subnet,
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 1206e3f..9c18052 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -76,7 +76,7 @@
conf.register_opt(my_config.service_option,
group='service_available')
conf.register_group(my_config.my_service_group)
- conf.register_opts(my_config.MyService +
+ conf.register_opts(my_config.MyServiceGroup,
my_config.my_service_group)
conf.register_group(my_config.my_service_feature_group)
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
index e54d4c0..0587e7b 100644
--- a/tempest/tests/README.rst
+++ b/tempest/tests/README.rst
@@ -7,16 +7,16 @@
---------------------
Unit tests are the self checks for Tempest. They provide functional
-verification and regression checking for the internal components of tempest.
-They should be used to just verify that the individual pieces of tempest are
+verification and regression checking for the internal components of Tempest.
+They should be used to just verify that the individual pieces of Tempest are
working as expected. They should not require an external service to be running
-and should be able to run solely from the tempest tree.
+and should be able to run solely from the Tempest tree.
-Why are these tests in tempest?
+Why are these tests in Tempest?
-------------------------------
These tests exist to make sure that the mechanisms that we use inside of
-tempest to are valid and remain functional. They are only here for self
-validation of tempest.
+Tempest are valid and remain functional. They are only here for self
+validation of Tempest.
Scope of these tests
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index 6345728..5024100 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -37,14 +37,16 @@
fake_image = mock.Mock(response={'location': image_id})
compute_images_client.create_image.return_value = fake_image
# call the utility method
- image = compute_base.BaseV2ComputeTest.create_image_from_server(
- mock.sentinel.server_id, name='fake-snapshot-name')
+ cleanup_path = 'tempest.test.BaseTestCase.addClassResourceCleanup'
+ with mock.patch(cleanup_path) as mock_cleanup:
+ image = compute_base.BaseV2ComputeTest.create_image_from_server(
+ mock.sentinel.server_id, name='fake-snapshot-name')
self.assertEqual(fake_image, image)
# make our assertions
compute_images_client.create_image.assert_called_once_with(
mock.sentinel.server_id, name='fake-snapshot-name')
- self.assertEqual(1, len(compute_base.BaseV2ComputeTest.images))
- self.assertEqual(image_id, compute_base.BaseV2ComputeTest.images[0])
+ mock_cleanup.assert_called_once()
+ self.assertIn(image_id, mock_cleanup.call_args[0])
@mock.patch.multiple(compute_base.BaseV2ComputeTest,
compute_images_client=mock.DEFAULT,
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index 6773b2f..8bf4c5b 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -20,7 +20,6 @@
from tempest import config
from tempest.tests import base
from tempest.tests import fake_config
-from tempest.tests.lib import fake_identity
class FakeOpts(object):
@@ -45,6 +44,7 @@
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.opts = FakeOpts(version=identity_version)
+ self.patch('oslo_log.log.setup', autospec=True)
def mock_resource_creation(self):
fake_resource = dict(id='id', name='name')
@@ -85,14 +85,10 @@
class TestAccountGeneratorV2(base.TestCase, MockHelpersMixin):
identity_version = 2
- identity_response = fake_identity._fake_v2_response
def setUp(self):
super(TestAccountGeneratorV2, self).setUp()
self.mock_config_and_opts(self.identity_version)
- self.useFixture(fixtures.MockPatch(
- 'tempest.lib.auth.AuthProvider.set_auth',
- return_value=self.identity_response))
def test_get_credential_provider(self):
cp = account_generator.get_credential_provider(self.opts)
@@ -115,7 +111,6 @@
class TestAccountGeneratorV3(TestAccountGeneratorV2):
identity_version = 3
- identity_response = fake_identity._fake_v3_response
def setUp(self):
super(TestAccountGeneratorV3, self).setUp()
@@ -145,16 +140,13 @@
class TestGenerateResourcesV2(base.TestCase, MockHelpersMixin):
identity_version = 2
- identity_response = fake_identity._fake_v2_response
cred_client = 'tempest.lib.common.cred_client.V2CredsClient'
- dynamic_creds = 'tempest.common.dynamic_creds.DynamicCredentialProvider'
+ dynamic_creds = ('tempest.lib.common.dynamic_creds.'
+ 'DynamicCredentialProvider')
def setUp(self):
super(TestGenerateResourcesV2, self).setUp()
self.mock_config_and_opts(self.identity_version)
- self.useFixture(fixtures.MockPatch(
- 'tempest.lib.auth.AuthProvider.set_auth',
- return_value=self.identity_response))
self.cred_provider = account_generator.get_credential_provider(
self.opts)
self.mock_resource_creation()
@@ -244,7 +236,6 @@
class TestGenerateResourcesV3(TestGenerateResourcesV2):
identity_version = 3
- identity_response = fake_identity._fake_v3_response
cred_client = 'tempest.lib.common.cred_client.V3CredsClient'
def setUp(self):
@@ -255,17 +246,14 @@
class TestDumpAccountsV2(base.TestCase, MockHelpersMixin):
identity_version = 2
- identity_response = fake_identity._fake_v2_response
cred_client = 'tempest.lib.common.cred_client.V2CredsClient'
- dynamic_creds = 'tempest.common.dynamic_creds.DynamicCredentialProvider'
+ dynamic_creds = ('tempest.lib.common.dynamic_creds.'
+ 'DynamicCredentialProvider')
domain_is_in = False
def setUp(self):
super(TestDumpAccountsV2, self).setUp()
self.mock_config_and_opts(self.identity_version)
- self.useFixture(fixtures.MockPatch(
- 'tempest.lib.auth.AuthProvider.set_auth',
- return_value=self.identity_response))
self.cred_provider = account_generator.get_credential_provider(
self.opts)
self.mock_resource_creation()
@@ -337,7 +325,6 @@
class TestDumpAccountsV3(TestDumpAccountsV2):
identity_version = 3
- identity_response = fake_identity._fake_v3_response
cred_client = 'tempest.lib.common.cred_client.V3CredsClient'
domain_is_in = True
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 7ac347d..0485e14 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -13,6 +13,7 @@
# under the License.
import argparse
+import atexit
import os
import shutil
import subprocess
@@ -25,6 +26,7 @@
from tempest.tests import base
DEVNULL = open(os.devnull, 'wb')
+atexit.register(DEVNULL.close)
class TestTempestRun(base.TestCase):
@@ -38,6 +40,7 @@
setattr(args, "subunit", True)
setattr(args, "parallel", False)
setattr(args, "concurrency", 10)
+ setattr(args, "load_list", '')
options = self.run_cmd._build_options(args)
self.assertEqual(['--subunit',
'--concurrency=10'],
@@ -68,6 +71,34 @@
self.assertEqual('i_am_a_fun_little_regex',
self.run_cmd._build_regex(args))
+ def test__build_whitelist_file(self):
+ args = mock.Mock(spec=argparse.Namespace)
+ setattr(args, 'smoke', False)
+ setattr(args, 'regex', None)
+ self.tests = tempfile.NamedTemporaryFile(
+ prefix='whitelist', delete=False)
+ self.tests.write(b"volume \n compute")
+ self.tests.close()
+ setattr(args, 'whitelist_file', self.tests.name)
+ setattr(args, 'blacklist_file', None)
+ self.assertEqual("volume|compute",
+ self.run_cmd._build_regex(args))
+ os.unlink(self.tests.name)
+
+ def test__build_blacklist_file(self):
+ args = mock.Mock(spec=argparse.Namespace)
+ setattr(args, 'smoke', False)
+ setattr(args, 'regex', None)
+ self.tests = tempfile.NamedTemporaryFile(
+ prefix='blacklist', delete=False)
+ self.tests.write(b"volume \n compute")
+ self.tests.close()
+ setattr(args, 'whitelist_file', None)
+ setattr(args, 'blacklist_file', self.tests.name)
+ self.assertEqual("^((?!compute|volume).)*$",
+ self.run_cmd._build_regex(args))
+ os.unlink(self.tests.name)
+
class TestRunReturnCode(base.TestCase):
def setUp(self):
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index b0e74fb..8641b63 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -16,9 +16,13 @@
import mock
from oslo_serialization import jsonutils as json
+from tempest import clients
from tempest.cmd import verify_tempest_config
+from tempest.common import credentials_factory
from tempest import config
+from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
from tempest.tests import base
from tempest.tests import fake_config
@@ -172,22 +176,6 @@
False, True)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
- def test_verify_keystone_api_versions_no_v2(self, mock_request):
- self.useFixture(fixtures.MockPatchObject(
- verify_tempest_config, '_get_unversioned_endpoint',
- return_value='http://fake_endpoint:5000'))
- fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
- fake_resp = json.dumps(fake_resp)
- mock_request.return_value = (None, fake_resp)
- fake_os = mock.MagicMock()
- with mock.patch.object(verify_tempest_config,
- 'print_and_or_update') as print_mock:
- verify_tempest_config.verify_keystone_api_versions(fake_os, True)
- print_mock.assert_called_once_with('api_v2',
- 'identity-feature-enabled',
- False, True)
-
- @mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v3(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
@@ -234,10 +222,15 @@
print_mock.assert_not_called()
def test_verify_glance_version_no_v2_with_v1_1(self):
- def fake_get_versions():
- return (None, ['v1.1'])
+ # This test verifies that wrong config api_v2 = True is detected
+ class FakeClient(object):
+ def get_versions(self):
+ return (None, ['v1.0'])
+
fake_os = mock.MagicMock()
- fake_os.image_client.get_versions = fake_get_versions
+ fake_module = mock.MagicMock()
+ fake_module.ImagesClient = FakeClient
+ fake_os.image_v1 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
@@ -245,10 +238,15 @@
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
- def fake_get_versions():
- return (None, ['v1.0'])
+ # This test verifies that wrong config api_v2 = True is detected
+ class FakeClient(object):
+ def get_versions(self):
+ return (None, ['v1.0'])
+
fake_os = mock.MagicMock()
- fake_os.image_client.get_versions = fake_get_versions
+ fake_module = mock.MagicMock()
+ fake_module.ImagesClient = FakeClient
+ fake_os.image_v1 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
@@ -256,24 +254,59 @@
False, True)
def test_verify_glance_version_no_v1(self):
- def fake_get_versions():
- return (None, ['v2.0'])
+ # This test verifies that wrong config api_v1 = True is detected
+ class FakeClient(object):
+ def get_versions(self):
+ raise lib_exc.NotFound()
+
+ def list_versions(self):
+ return {'versions': [{'id': 'v2.0'}]}
+
fake_os = mock.MagicMock()
- fake_os.image_client.get_versions = fake_get_versions
+ fake_module = mock.MagicMock()
+ fake_module.ImagesClient = FakeClient
+ fake_module.VersionsClient = FakeClient
+ fake_os.image_v1 = fake_module
+ fake_os.image_v2 = fake_module
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'image-feature-enabled',
False, True)
+ def test_verify_glance_version_no_version(self):
+ # This test verifies that wrong config api_v1 = True is detected
+ class FakeClient(object):
+ def get_versions(self):
+ raise lib_exc.NotFound()
+
+ def list_versions(self):
+ raise lib_exc.NotFound()
+
+ fake_os = mock.MagicMock()
+ fake_module = mock.MagicMock()
+ fake_module.ImagesClient = FakeClient
+ fake_module.VersionsClient = FakeClient
+ fake_os.image_v1 = fake_module
+ fake_os.image_v2 = fake_module
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('glance',
+ 'service-available',
+ False, True)
+
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
- fake_os.network_extensions_client.list_extensions = (
- fake_list_extensions)
+ fake_client = mock.MagicMock()
+ fake_client.list_extensions = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
@@ -295,8 +328,11 @@
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
- fake_os.network_extensions_client.list_extensions = (
- fake_list_extensions)
+ fake_client = mock.MagicMock()
+ fake_client.list_extensions = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
@@ -313,15 +349,17 @@
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
- # NOTE (e0ne): mock both v1 and v2 APIs
- fake_os.volumes_extension_client.list_extensions = fake_list_extensions
- fake_os.volumes_v2_extension_client.list_extensions = (
- fake_list_extensions)
+ fake_client = mock.MagicMock()
+ fake_client.list_extensions = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
+
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
@@ -338,10 +376,11 @@
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
- # NOTE (e0ne): mock both v1 and v2 APIs
- fake_os.volumes_extension_client.list_extensions = fake_list_extensions
- fake_os.volumes_v2_extension_client.list_extensions = (
- fake_list_extensions)
+ fake_client = mock.MagicMock()
+ fake_client.list_extensions = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
@@ -357,7 +396,11 @@
return ([{'alias': 'fake1'}, {'alias': 'fake2'},
{'alias': 'not_fake'}])
fake_os = mock.MagicMock()
- fake_os.extensions_client.list_extensions = fake_list_extensions
+ fake_client = mock.MagicMock()
+ fake_client.list_extensions = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
@@ -379,7 +422,11 @@
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
- fake_os.extensions_client.list_extensions = fake_list_extensions
+ fake_client = mock.MagicMock()
+ fake_client.list_extensions = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
@@ -392,12 +439,16 @@
def test_verify_extensions_swift(self):
def fake_list_extensions():
- return (None, {'fake1': 'metadata',
- 'fake2': 'metadata',
- 'not_fake': 'metadata',
- 'swift': 'metadata'})
+ return {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'}
fake_os = mock.MagicMock()
- fake_os.capabilities_client.list_capabilities = fake_list_extensions
+ fake_client = mock.MagicMock()
+ fake_client.list_capabilities = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
@@ -414,12 +465,16 @@
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
- return (None, {'fake1': 'metadata',
- 'fake2': 'metadata',
- 'not_fake': 'metadata',
- 'swift': 'metadata'})
+ return {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'}
fake_os = mock.MagicMock()
- fake_os.capabilities_client.list_capabilities = fake_list_extensions
+ fake_client = mock.MagicMock()
+ fake_client.list_capabilities = fake_list_extensions
+ self.useFixture(fixtures.MockPatchObject(
+ verify_tempest_config, 'get_extension_client',
+ return_value=fake_client))
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
@@ -429,3 +484,13 @@
self.assertIn('extensions', results['swift'])
self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
sorted(results['swift']['extensions']))
+
+ def test_get_extension_client(self):
+ creds = credentials_factory.get_credentials(
+ fill_in=False, username='fake_user', project_name='fake_project',
+ password='fake_password')
+ os = clients.Manager(creds)
+ for service in ['nova', 'neutron', 'swift', 'cinder']:
+ extensions_client = verify_tempest_config.get_extension_client(
+ os, service)
+ self.assertIsInstance(extensions_client, rest_client.RestClient)
diff --git a/tempest/tests/common/test_admin_available.py b/tempest/tests/common/test_admin_available.py
index c3d248c..7b3b1b0 100644
--- a/tempest/tests/common/test_admin_available.py
+++ b/tempest/tests/common/test_admin_available.py
@@ -53,7 +53,7 @@
'password': 'p',
'types': ['admin']})
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=accounts))
cfg.CONF.set_default('test_accounts_file',
use_accounts_file, group='auth')
diff --git a/tempest/tests/common/test_alt_available.py b/tempest/tests/common/test_alt_available.py
index b9a8967..a425bb8 100644
--- a/tempest/tests/common/test_alt_available.py
+++ b/tempest/tests/common/test_alt_available.py
@@ -40,7 +40,7 @@
project_name="t%s" % ii,
password="p") for ii in creds]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=accounts))
cfg.CONF.set_default('test_accounts_file',
use_accounts_file, group='auth')
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
new file mode 100644
index 0000000..7cf87f8
--- /dev/null
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -0,0 +1,279 @@
+# Copyright 2017 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo_config import cfg
+import testtools
+
+from tempest.common import credentials_factory as cf
+from tempest import config
+from tempest.lib.common import dynamic_creds
+from tempest.lib.common import preprov_creds
+from tempest.lib import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests.lib import fake_credentials
+
+
+class TestCredentialsFactory(base.TestCase):
+
+ def setUp(self):
+ super(TestCredentialsFactory, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+
+ def test_get_dynamic_provider_params_creds_v2(self):
+ expected_uri = 'EXPECTED_V2_URI'
+ cfg.CONF.set_default('uri', expected_uri, group='identity')
+ admin_creds = fake_credentials.FakeCredentials()
+ params = cf.get_dynamic_provider_params('v2', admin_creds=admin_creds)
+ expected_params = dict(identity_uri=expected_uri,
+ admin_creds=admin_creds)
+ for key in expected_params:
+ self.assertIn(key, params)
+ self.assertEqual(expected_params[key], params[key])
+
+ def test_get_dynamic_provider_params_creds_v3(self):
+ expected_uri = 'EXPECTED_V3_URI'
+ cfg.CONF.set_default('uri_v3', expected_uri, group='identity')
+ admin_creds = fake_credentials.FakeCredentials()
+ params = cf.get_dynamic_provider_params('v3', admin_creds=admin_creds)
+ expected_params = dict(identity_uri=expected_uri,
+ admin_creds=admin_creds)
+ for key in expected_params:
+ self.assertIn(key, params)
+ self.assertEqual(expected_params[key], params[key])
+
+ def test_get_dynamic_provider_params_creds_vx(self):
+ admin_creds = fake_credentials.FakeCredentials()
+ invalid_version = 'invalid_version_x'
+ with testtools.ExpectedException(
+ exc_type=exceptions.InvalidIdentityVersion,
+ value_re='Invalid version ' + invalid_version):
+ cf.get_dynamic_provider_params(invalid_version,
+ admin_creds=admin_creds)
+
+ def test_get_dynamic_provider_params_no_creds(self):
+ expected_identity_version = 'v3'
+ with mock.patch.object(
+ cf, 'get_configured_admin_credentials') as admin_creds_mock:
+ cf.get_dynamic_provider_params(expected_identity_version)
+ admin_creds_mock.assert_called_once_with(
+ fill_in=True, identity_version=expected_identity_version)
+
+ def test_get_preprov_provider_params_creds_v2(self):
+ expected_uri = 'EXPECTED_V2_URI'
+ cfg.CONF.set_default('uri', expected_uri, group='identity')
+ params = cf.get_preprov_provider_params('v2')
+ self.assertIn('identity_uri', params)
+ self.assertEqual(expected_uri, params['identity_uri'])
+
+ def test_get_preprov_provider_params_creds_v3(self):
+ expected_uri = 'EXPECTED_V3_URI'
+ cfg.CONF.set_default('uri_v3', expected_uri, group='identity')
+ params = cf.get_preprov_provider_params('v3')
+ self.assertIn('identity_uri', params)
+ self.assertEqual(expected_uri, params['identity_uri'])
+
+ def test_get_preprov_provider_params_creds_vx(self):
+ invalid_version = 'invalid_version_x'
+ with testtools.ExpectedException(
+ exc_type=exceptions.InvalidIdentityVersion,
+ value_re='Invalid version ' + invalid_version):
+ cf.get_dynamic_provider_params(invalid_version)
+
+ @mock.patch.object(dynamic_creds, 'DynamicCredentialProvider')
+ @mock.patch.object(cf, 'get_dynamic_provider_params')
+ def test_get_credentials_provider_dynamic(
+ self, mock_dynamic_provider_params,
+ mock_dynamic_credentials_provider_class):
+ cfg.CONF.set_default('use_dynamic_credentials', True, group='auth')
+ expected_params = {'foo': 'bar'}
+ mock_dynamic_provider_params.return_value = expected_params
+ expected_name = 'my_name'
+ expected_network_resources = {'network': 'resources'}
+ expected_identity_version = 'identity_version'
+ cf.get_credentials_provider(
+ expected_name,
+ network_resources=expected_network_resources,
+ force_tenant_isolation=False,
+ identity_version=expected_identity_version)
+ mock_dynamic_provider_params.assert_called_once_with(
+ expected_identity_version)
+ mock_dynamic_credentials_provider_class.assert_called_once_with(
+ name=expected_name, network_resources=expected_network_resources,
+ **expected_params)
+
+ @mock.patch.object(preprov_creds, 'PreProvisionedCredentialProvider')
+ @mock.patch.object(cf, 'get_preprov_provider_params')
+ def test_get_credentials_provider_preprov(
+ self, mock_preprov_provider_params,
+ mock_preprov_credentials_provider_class):
+ cfg.CONF.set_default('use_dynamic_credentials', False, group='auth')
+ cfg.CONF.set_default('test_accounts_file', '/some/file', group='auth')
+ expected_params = {'foo': 'bar'}
+ mock_preprov_provider_params.return_value = expected_params
+ expected_name = 'my_name'
+ expected_identity_version = 'identity_version'
+ cf.get_credentials_provider(
+ expected_name,
+ force_tenant_isolation=False,
+ identity_version=expected_identity_version)
+ mock_preprov_provider_params.assert_called_once_with(
+ expected_identity_version)
+ mock_preprov_credentials_provider_class.assert_called_once_with(
+ name=expected_name, **expected_params)
+
+ def test_get_credentials_provider_preprov_no_file(self):
+ cfg.CONF.set_default('use_dynamic_credentials', False, group='auth')
+ cfg.CONF.set_default('test_accounts_file', None, group='auth')
+ with testtools.ExpectedException(
+ exc_type=exceptions.InvalidConfiguration):
+ cf.get_credentials_provider(
+ 'some_name',
+ force_tenant_isolation=False,
+ identity_version='some_version')
+
+ @mock.patch.object(dynamic_creds, 'DynamicCredentialProvider')
+ @mock.patch.object(cf, 'get_dynamic_provider_params')
+ def test_get_credentials_provider_force_dynamic(
+ self, mock_dynamic_provider_params,
+ mock_dynamic_credentials_provider_class):
+ cfg.CONF.set_default('use_dynamic_credentials', False, group='auth')
+ expected_params = {'foo': 'bar'}
+ mock_dynamic_provider_params.return_value = expected_params
+ expected_name = 'my_name'
+ expected_network_resources = {'network': 'resources'}
+ expected_identity_version = 'identity_version'
+ cf.get_credentials_provider(
+ expected_name,
+ network_resources=expected_network_resources,
+ force_tenant_isolation=True,
+ identity_version=expected_identity_version)
+ mock_dynamic_provider_params.assert_called_once_with(
+ expected_identity_version)
+ mock_dynamic_credentials_provider_class.assert_called_once_with(
+ name=expected_name, network_resources=expected_network_resources,
+ **expected_params)
+
+ @mock.patch.object(cf, 'get_credentials')
+ def test_get_configured_admin_credentials(self, mock_get_credentials):
+ cfg.CONF.set_default('auth_version', 'v3', 'identity')
+ all_params = [('admin_username', 'username', 'my_name'),
+ ('admin_password', 'password', 'secret'),
+ ('admin_project_name', 'project_name', 'my_pname'),
+ ('admin_domain_name', 'domain_name', 'my_dname')]
+ expected_result = 'my_admin_credentials'
+ mock_get_credentials.return_value = expected_result
+ for config_item, _, value in all_params:
+ cfg.CONF.set_default(config_item, value, 'auth')
+ # Build the expected params
+ expected_params = dict(
+ [(field, value) for _, field, value in all_params])
+ expected_params.update(config.service_client_config())
+ admin_creds = cf.get_configured_admin_credentials()
+ mock_get_credentials.assert_called_once_with(
+ fill_in=True, identity_version='v3', **expected_params)
+ self.assertEqual(expected_result, admin_creds)
+
+ @mock.patch.object(cf, 'get_credentials')
+ def test_get_configured_admin_credentials_not_fill_valid(
+ self, mock_get_credentials):
+ cfg.CONF.set_default('auth_version', 'v2', 'identity')
+ all_params = [('admin_username', 'username', 'my_name'),
+ ('admin_password', 'password', 'secret'),
+ ('admin_project_name', 'project_name', 'my_pname'),
+ ('admin_domain_name', 'domain_name', 'my_dname')]
+ expected_result = mock.Mock()
+ expected_result.is_valid.return_value = True
+ mock_get_credentials.return_value = expected_result
+ for config_item, _, value in all_params:
+ cfg.CONF.set_default(config_item, value, 'auth')
+ # Build the expected params
+ expected_params = dict(
+ [(field, value) for _, field, value in all_params])
+ expected_params.update(config.service_client_config())
+ admin_creds = cf.get_configured_admin_credentials(
+ fill_in=False, identity_version='v3')
+ mock_get_credentials.assert_called_once_with(
+ fill_in=False, identity_version='v3', **expected_params)
+ self.assertEqual(expected_result, admin_creds)
+ expected_result.is_valid.assert_called_once()
+
+ @mock.patch.object(cf, 'get_credentials')
+ def test_get_configured_admin_credentials_not_fill_not_valid(
+ self, mock_get_credentials):
+ cfg.CONF.set_default('auth_version', 'v2', 'identity')
+ expected_result = mock.Mock()
+ expected_result.is_valid.return_value = False
+ mock_get_credentials.return_value = expected_result
+ with testtools.ExpectedException(exceptions.InvalidConfiguration,
+ value_re='.*\n.*identity version v2'):
+ cf.get_configured_admin_credentials(fill_in=False)
+
+ @mock.patch('tempest.lib.auth.get_credentials')
+ def test_get_credentials_v2(self, mock_auth_get_credentials):
+ expected_uri = 'V2_URI'
+ expected_result = 'my_creds'
+ mock_auth_get_credentials.return_value = expected_result
+ cfg.CONF.set_default('uri', expected_uri, 'identity')
+ params = {'foo': 'bar'}
+ expected_params = params.copy()
+ expected_params.update(config.service_client_config())
+ result = cf.get_credentials(identity_version='v2', **params)
+ self.assertEqual(expected_result, result)
+ mock_auth_get_credentials.assert_called_once_with(
+ expected_uri, fill_in=True, identity_version='v2',
+ **expected_params)
+
+ @mock.patch('tempest.lib.auth.get_credentials')
+ def test_get_credentials_v3_no_domain(self, mock_auth_get_credentials):
+ expected_uri = 'V3_URI'
+ expected_result = 'my_creds'
+ expected_domain = 'my_domain'
+ mock_auth_get_credentials.return_value = expected_result
+ cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
+ cfg.CONF.set_default('default_credentials_domain_name',
+ expected_domain, 'auth')
+ params = {'foo': 'bar'}
+ expected_params = params.copy()
+ expected_params['domain_name'] = expected_domain
+ expected_params.update(config.service_client_config())
+ result = cf.get_credentials(fill_in=False, identity_version='v3',
+ **params)
+ self.assertEqual(expected_result, result)
+ mock_auth_get_credentials.assert_called_once_with(
+ expected_uri, fill_in=False, identity_version='v3',
+ **expected_params)
+
+ @mock.patch('tempest.lib.auth.get_credentials')
+ def test_get_credentials_v3_domain(self, mock_auth_get_credentials):
+ expected_uri = 'V3_URI'
+ expected_result = 'my_creds'
+ expected_domain = 'my_domain'
+ mock_auth_get_credentials.return_value = expected_result
+ cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
+ cfg.CONF.set_default('default_credentials_domain_name',
+ expected_domain, 'auth')
+ params = {'foo': 'bar', 'user_domain_name': expected_domain}
+ expected_params = params.copy()
+ expected_params.update(config.service_client_config())
+ result = cf.get_credentials(fill_in=False, identity_version='v3',
+ **params)
+ self.assertEqual(expected_result, result)
+ mock_auth_get_credentials.assert_called_once_with(
+ expected_uri, fill_in=False, identity_version='v3',
+ **expected_params)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index c2f622c..bc197b5 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -59,6 +59,7 @@
# Tests that the wait method raises VolumeRestoreErrorException if
# the volume status is 'error_restoring'.
client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
build_interval=1)
volume1 = {'volume': {'status': 'restoring-backup'}}
volume2 = {'volume': {'status': 'error_restoring'}}
diff --git a/tempest/tests/lib/cli/test_execute.py b/tempest/tests/lib/cli/test_execute.py
index 0130454..c276386 100644
--- a/tempest/tests/lib/cli/test_execute.py
+++ b/tempest/tests/lib/cli/test_execute.py
@@ -91,3 +91,37 @@
self.assertEqual(mock_execute.call_count, 1)
self.assertEqual(mock_execute.call_args[1],
{'prefix': 'env LAC_ALL=C'})
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_domain_name(self, mock_execute):
+ cli = cli_base.CLIClient(
+ user_domain_name='default',
+ project_domain_name='default'
+ )
+ cli.glance('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertIn('--os-user-domain-name default',
+ mock_execute.call_args[0][2])
+ self.assertIn('--os-project-domain-name default',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-user-domain-id',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-project-domain-id',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_domain_id(self, mock_execute):
+ cli = cli_base.CLIClient(
+ user_domain_id='default',
+ project_domain_id='default'
+ )
+ cli.glance('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertIn('--os-user-domain-id default',
+ mock_execute.call_args[0][2])
+ self.assertIn('--os-project-domain-id default',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-user-domain-name',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-project-domain-name',
+ mock_execute.call_args[0][2])
diff --git a/tempest/tests/lib/common/test_api_version_utils.py b/tempest/tests/lib/common/test_api_version_utils.py
index 6206379..b99e8d4 100644
--- a/tempest/tests/lib/common/test_api_version_utils.py
+++ b/tempest/tests/lib/common/test_api_version_utils.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
import testtools
from tempest.lib.common import api_version_utils
@@ -30,7 +31,7 @@
cfg_max_version)
except testtools.TestCase.skipException as e:
if not expected_skip:
- raise testtools.TestCase.failureException(e.message)
+ raise testtools.TestCase.failureException(six.text_type(e))
def test_version_min_in_range(self):
self._test_version('2.2', '2.10', '2.1', '2.7')
@@ -91,24 +92,106 @@
def test_header_matches(self):
microversion_header_name = 'x-openstack-xyz-api-version'
request_microversion = '2.1'
- test_respose = {microversion_header_name: request_microversion}
+ test_response = {microversion_header_name: request_microversion}
api_version_utils.assert_version_header_matches_request(
- microversion_header_name, request_microversion, test_respose)
+ microversion_header_name, request_microversion, test_response)
def test_header_does_not_match(self):
microversion_header_name = 'x-openstack-xyz-api-version'
request_microversion = '2.1'
- test_respose = {microversion_header_name: '2.2'}
+ test_response = {microversion_header_name: '2.2'}
self.assertRaises(
exceptions.InvalidHTTPResponseHeader,
api_version_utils.assert_version_header_matches_request,
- microversion_header_name, request_microversion, test_respose)
+ microversion_header_name, request_microversion, test_response)
def test_header_not_present(self):
microversion_header_name = 'x-openstack-xyz-api-version'
request_microversion = '2.1'
- test_respose = {}
+ test_response = {}
self.assertRaises(
exceptions.InvalidHTTPResponseHeader,
api_version_utils.assert_version_header_matches_request,
- microversion_header_name, request_microversion, test_respose)
+ microversion_header_name, request_microversion, test_response)
+
+ def test_compare_versions_less_than(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.2'
+ test_response = {microversion_header_name: '2.1'}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "lt"))
+
+ def test_compare_versions_less_than_equal(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.2'
+ test_response = {microversion_header_name: '2.1'}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "le"))
+
+ def test_compare_versions_greater_than_equal(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_response = {microversion_header_name: '2.2'}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "ge"))
+
+ def test_compare_versions_greater_than(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_response = {microversion_header_name: '2.2'}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "gt"))
+
+ def test_compare_versions_equal(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.11'
+ test_response = {microversion_header_name: '2.1'}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "eq"))
+
+ def test_compare_versions_not_equal(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_response = {microversion_header_name: '2.1'}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "ne"))
+
+ def test_compare_versions_with_name_in_microversion(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = 'volume 3.1'
+ test_response = {microversion_header_name: 'volume 3.1'}
+ self.assertTrue(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "eq"))
+
+ def test_compare_versions_invalid_operation(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_response = {microversion_header_name: '2.1'}
+ self.assertRaises(
+ exceptions.InvalidParam,
+ api_version_utils.compare_version_header_to_response,
+ microversion_header_name, request_microversion, test_response,
+ "foo")
+
+ def test_compare_versions_header_not_present(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_response = {}
+ self.assertFalse(
+ api_version_utils.compare_version_header_to_response(
+ microversion_header_name, request_microversion, test_response,
+ "eq"))
diff --git a/tempest/tests/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
similarity index 98%
rename from tempest/tests/common/test_dynamic_creds.py
rename to tempest/tests/lib/common/test_dynamic_creds.py
index c739619..ebcf5d1 100644
--- a/tempest/tests/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -17,8 +17,8 @@
from oslo_config import cfg
from tempest.common import credentials_factory as credentials
-from tempest.common import dynamic_creds
from tempest import config
+from tempest.lib.common import dynamic_creds
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.identity.v2 import identity_client as v2_iden_client
@@ -40,13 +40,15 @@
from tempest.tests import fake_config
from tempest.tests.lib import fake_http
from tempest.tests.lib import fake_identity
+from tempest.tests.lib.services import registry_fixture
class TestDynamicCredentialProvider(base.TestCase):
fixed_params = {'name': 'test class',
'identity_version': 'v2',
- 'admin_role': 'admin'}
+ 'admin_role': 'admin',
+ 'identity_uri': 'fake_uri'}
token_client = v2_token_client
iden_client = v2_iden_client
@@ -61,6 +63,7 @@
def setUp(self):
super(TestDynamicCredentialProvider, self).setUp()
self.useFixture(fake_config.ConfigFixture())
+ self.useFixture(registry_fixture.RegistryFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.patchobject(self.token_client_class, 'raw_request',
@@ -619,7 +622,8 @@
fixed_params = {'name': 'test class',
'identity_version': 'v3',
- 'admin_role': 'admin'}
+ 'admin_role': 'admin',
+ 'identity_uri': 'fake_uri'}
token_client = v3_token_client
iden_client = v3_iden_client
@@ -657,7 +661,7 @@
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
creds.creds_client = mock.MagicMock()
creds.creds_client.create_user_role.side_effect = lib_exc.Conflict
- with mock.patch('tempest.common.dynamic_creds.LOG') as log_mock:
+ with mock.patch('tempest.lib.common.dynamic_creds.LOG') as log_mock:
creds._create_creds()
log_mock.warning.assert_called_once_with(
"Member role already exists, ignoring conflict.")
diff --git a/tempest/tests/lib/common/test_http.py b/tempest/tests/lib/common/test_http.py
new file mode 100644
index 0000000..a292209
--- /dev/null
+++ b/tempest/tests/lib/common/test_http.py
@@ -0,0 +1,68 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common import http
+from tempest.tests import base
+
+
+class TestClosingHttp(base.TestCase):
+ def setUp(self):
+ super(TestClosingHttp, self).setUp()
+ self.cert_none = "CERT_NONE"
+ self.cert_location = "/etc/ssl/certs/ca-certificates.crt"
+
+ def test_constructor_invalid_ca_certs_and_timeout(self):
+ connection = http.ClosingHttp(
+ disable_ssl_certificate_validation=False,
+ ca_certs=None,
+ timeout=None)
+ for attr in ('cert_reqs', 'ca_certs', 'timeout'):
+ self.assertNotIn(attr, connection.connection_pool_kw)
+
+ def test_constructor_valid_ca_certs(self):
+ cert_required = 'CERT_REQUIRED'
+ connection = http.ClosingHttp(
+ disable_ssl_certificate_validation=False,
+ ca_certs=self.cert_location,
+ timeout=None)
+ self.assertEqual(cert_required,
+ connection.connection_pool_kw['cert_reqs'])
+ self.assertEqual(self.cert_location,
+ connection.connection_pool_kw['ca_certs'])
+ self.assertNotIn('timeout',
+ connection.connection_pool_kw)
+
+ def test_constructor_ssl_cert_validation_disabled(self):
+ connection = http.ClosingHttp(
+ disable_ssl_certificate_validation=True,
+ ca_certs=None,
+ timeout=30)
+ self.assertEqual(self.cert_none,
+ connection.connection_pool_kw['cert_reqs'])
+ self.assertEqual(30,
+ connection.connection_pool_kw['timeout'])
+ self.assertNotIn('ca_certs',
+ connection.connection_pool_kw)
+
+ def test_constructor_ssl_cert_validation_disabled_and_ca_certs(self):
+ connection = http.ClosingHttp(
+ disable_ssl_certificate_validation=True,
+ ca_certs=self.cert_location,
+ timeout=None)
+ self.assertNotIn('timeout',
+ connection.connection_pool_kw)
+ self.assertEqual(self.cert_none,
+ connection.connection_pool_kw['cert_reqs'])
+ self.assertNotIn('ca_certs',
+ connection.connection_pool_kw)
diff --git a/tempest/tests/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
similarity index 96%
rename from tempest/tests/common/test_preprov_creds.py
rename to tempest/tests/lib/common/test_preprov_creds.py
index 414b106..9b10159 100644
--- a/tempest/tests/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -24,20 +24,22 @@
from oslo_concurrency.fixture import lockutils as lockutils_fixtures
from oslo_config import cfg
-from tempest.common import preprov_creds
from tempest import config
from tempest.lib import auth
from tempest.lib.common import cred_provider
+from tempest.lib.common import preprov_creds
from tempest.lib import exceptions as lib_exc
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_identity
+from tempest.tests.lib.services import registry_fixture
class TestPreProvisionedCredentials(base.TestCase):
fixed_params = {'name': 'test class',
'identity_version': 'v2',
+ 'identity_uri': 'fake_uri',
'test_accounts_file': 'fake_accounts_file',
'accounts_lock_dir': 'fake_locks_dir',
'admin_role': 'admin',
@@ -87,10 +89,12 @@
self.useFixture(lockutils_fixtures.ExternalLockFixture())
self.test_accounts = self._fake_accounts(cfg.CONF.identity.admin_role)
self.accounts_mock = self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=self.test_accounts))
self.useFixture(fixtures.MockPatch(
'os.path.isfile', return_value=True))
+ # Make sure we leave the registry clean
+ self.useFixture(registry_fixture.RegistryFixture())
def tearDown(self):
super(TestPreProvisionedCredentials, self).tearDown()
@@ -267,7 +271,7 @@
def test_is_not_multi_user(self):
self.test_accounts = [self.test_accounts[0]]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=self.test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -331,7 +335,7 @@
'password': 'p', 'roles': ['role-7', 'role-11'],
'resources': {'network': 'network-2'}}]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -359,7 +363,7 @@
admin_accounts = [x for x in self.test_accounts if 'test_admin'
in x['username']]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=admin_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -377,7 +381,7 @@
admin_accounts = [x for x in self.test_accounts if 'test_admin'
in x['username']]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=admin_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -398,7 +402,7 @@
{'username': 'test_admin1', 'tenant_name': 'test_tenant11',
'password': 'p', 'types': ['admin']}]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -412,7 +416,7 @@
{'username': 'test_admin1', 'tenant_name': 'test_tenant11',
'password': 'p', 'roles': [cfg.CONF.identity.admin_role]}]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -423,7 +427,7 @@
non_admin_accounts = [x for x in self.test_accounts if 'test_admin'
not in x['username']]
self.useFixture(fixtures.MockPatch(
- 'tempest.common.preprov_creds.read_accounts_yaml',
+ 'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=non_admin_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
@@ -436,6 +440,7 @@
fixed_params = {'name': 'test class',
'identity_version': 'v3',
+ 'identity_uri': 'fake_uri',
'test_accounts_file': 'fake_accounts_file',
'accounts_lock_dir': 'fake_locks_dir_v3',
'admin_role': 'admin',
diff --git a/tempest/tests/lib/common/test_validation_resources.py b/tempest/tests/lib/common/test_validation_resources.py
new file mode 100644
index 0000000..d5139f4
--- /dev/null
+++ b/tempest/tests/lib/common/test_validation_resources.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2017 IBM Corp.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+import mock
+import testtools
+
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services import clients
+from tempest.tests import base
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib.services import registry_fixture
+
+FAKE_SECURITY_GROUP = {'security_group': {'id': 'sg_id'}}
+FAKE_KEYPAIR = {'keypair': {'name': 'keypair_name'}}
+FAKE_FIP_NOVA_NET = {'floating_ip': {'ip': '1.2.3.4', 'id': '1234'}}
+FAKE_FIP_NEUTRON = {'floatingip': {'floating_ip_address': '1.2.3.4',
+ 'id': '1234'}}
+
+SERVICES = 'tempest.lib.services'
+SG_CLIENT = (SERVICES + '.%s.security_groups_client.SecurityGroupsClient.%s')
+SGR_CLIENT = (SERVICES + '.%s.security_group_rules_client.'
+ 'SecurityGroupRulesClient.create_security_group_rule')
+KP_CLIENT = (SERVICES + '.compute.keypairs_client.KeyPairsClient.%s')
+FIP_CLIENT = (SERVICES + '.%s.floating_ips_client.FloatingIPsClient.%s')
+
+
+class TestValidationResources(base.TestCase):
+
+ def setUp(self):
+ super(TestValidationResources, self).setUp()
+ self.useFixture(registry_fixture.RegistryFixture())
+ self.mock_sg_compute = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('compute', 'create_security_group'), autospec=True,
+ return_value=FAKE_SECURITY_GROUP))
+ self.mock_sg_network = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('network', 'create_security_group'), autospec=True,
+ return_value=FAKE_SECURITY_GROUP))
+ self.mock_sgr_compute = self.useFixture(fixtures.MockPatch(
+ SGR_CLIENT % 'compute', autospec=True))
+ self.mock_sgr_network = self.useFixture(fixtures.MockPatch(
+ SGR_CLIENT % 'network', autospec=True))
+ self.mock_kp = self.useFixture(fixtures.MockPatch(
+ KP_CLIENT % 'create_keypair', autospec=True,
+ return_value=FAKE_KEYPAIR))
+ self.mock_fip_compute = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('compute', 'create_floating_ip'), autospec=True,
+ return_value=FAKE_FIP_NOVA_NET))
+ self.mock_fip_network = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('network', 'create_floatingip'), autospec=True,
+ return_value=FAKE_FIP_NEUTRON))
+ self.os = clients.ServiceClients(
+ fake_credentials.FakeKeystoneV3Credentials(), 'fake_uri')
+
+ def test_create_ssh_security_group_nova_net(self):
+ expected_sg_id = FAKE_SECURITY_GROUP['security_group']['id']
+ sg = vr.create_ssh_security_group(self.os, add_rule=True,
+ use_neutron=False)
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+ # Neutron clients have not been used
+ self.assertEqual(self.mock_sg_network.mock.call_count, 0)
+ self.assertEqual(self.mock_sgr_network.mock.call_count, 0)
+ # Nova-net clients assertions
+ self.assertGreater(self.mock_sg_compute.mock.call_count, 0)
+ self.assertGreater(self.mock_sgr_compute.mock.call_count, 0)
+ for call in self.mock_sgr_compute.mock.call_args_list[1:]:
+ self.assertIn(expected_sg_id, call[1].values())
+
+ def test_create_ssh_security_group_neutron(self):
+ expected_sg_id = FAKE_SECURITY_GROUP['security_group']['id']
+ expected_ethertype = 'fake_ethertype'
+ sg = vr.create_ssh_security_group(self.os, add_rule=True,
+ use_neutron=True,
+ ethertype=expected_ethertype)
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+ # Nova-net clients have not been used
+ self.assertEqual(self.mock_sg_compute.mock.call_count, 0)
+ self.assertEqual(self.mock_sgr_compute.mock.call_count, 0)
+ # Nova-net clients assertions
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertGreater(self.mock_sgr_network.mock.call_count, 0)
+ # Check SG ID and ethertype are passed down to rules
+ for call in self.mock_sgr_network.mock.call_args_list[1:]:
+ self.assertIn(expected_sg_id, call[1].values())
+ self.assertIn(expected_ethertype, call[1].values())
+
+ def test_create_ssh_security_no_rules(self):
+ sg = vr.create_ssh_security_group(self.os, add_rule=False)
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+ # SG Rules clients have not been used
+ self.assertEqual(self.mock_sgr_compute.mock.call_count, 0)
+ self.assertEqual(self.mock_sgr_network.mock.call_count, 0)
+
+ @mock.patch.object(vr, 'create_ssh_security_group',
+ return_value=FAKE_SECURITY_GROUP['security_group'])
+ def test_create_validation_resources_nova_net(self, mock_create_sg):
+ expected_floating_network_id = 'my_fni'
+ expected_floating_network_name = 'my_fnn'
+ resources = vr.create_validation_resources(
+ self.os, keypair=True, floating_ip=True, security_group=True,
+ security_group_rules=True, ethertype='IPv6', use_neutron=False,
+ floating_network_id=expected_floating_network_id,
+ floating_network_name=expected_floating_network_name)
+ # Keypair calls
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ # Floating IP calls
+ self.assertGreater(self.mock_fip_compute.mock.call_count, 0)
+ for call in self.mock_fip_compute.mock.call_args_list[1:]:
+ self.assertIn(expected_floating_network_name, call[1].values())
+ self.assertNotIn(expected_floating_network_id, call[1].values())
+ self.assertEqual(self.mock_fip_network.mock.call_count, 0)
+ # SG calls
+ mock_create_sg.assert_called_once()
+ # Resources
+ for resource in ['keypair', 'floating_ip', 'security_group']:
+ self.assertIn(resource, resources)
+ self.assertEqual(FAKE_KEYPAIR['keypair'], resources['keypair'])
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'],
+ resources['security_group'])
+ self.assertEqual(FAKE_FIP_NOVA_NET['floating_ip'],
+ resources['floating_ip'])
+
+ @mock.patch.object(vr, 'create_ssh_security_group',
+ return_value=FAKE_SECURITY_GROUP['security_group'])
+ def test_create_validation_resources_neutron(self, mock_create_sg):
+ expected_floating_network_id = 'my_fni'
+ expected_floating_network_name = 'my_fnn'
+ resources = vr.create_validation_resources(
+ self.os, keypair=True, floating_ip=True, security_group=True,
+ security_group_rules=True, ethertype='IPv6', use_neutron=True,
+ floating_network_id=expected_floating_network_id,
+ floating_network_name=expected_floating_network_name)
+ # Keypair calls
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ # Floating IP calls
+ self.assertEqual(self.mock_fip_compute.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+ for call in self.mock_fip_compute.mock.call_args_list[1:]:
+ self.assertIn(expected_floating_network_id, call[1].values())
+ self.assertNotIn(expected_floating_network_name, call[1].values())
+ # SG calls
+ mock_create_sg.assert_called_once()
+ # Resources
+ for resource in ['keypair', 'floating_ip', 'security_group']:
+ self.assertIn(resource, resources)
+ self.assertEqual(FAKE_KEYPAIR['keypair'], resources['keypair'])
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'],
+ resources['security_group'])
+ self.assertIn('ip', resources['floating_ip'])
+ self.assertEqual(resources['floating_ip']['ip'],
+ FAKE_FIP_NEUTRON['floatingip']['floating_ip_address'])
+ self.assertEqual(resources['floating_ip']['id'],
+ FAKE_FIP_NEUTRON['floatingip']['id'])
+
+
+class TestClearValidationResourcesFixture(base.TestCase):
+
+ def setUp(self):
+ super(TestClearValidationResourcesFixture, self).setUp()
+ self.useFixture(registry_fixture.RegistryFixture())
+ self.mock_sg_compute = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('compute', 'delete_security_group'), autospec=True))
+ self.mock_sg_network = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('network', 'delete_security_group'), autospec=True))
+ self.mock_sg_wait_compute = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('compute', 'wait_for_resource_deletion'),
+ autospec=True))
+ self.mock_sg_wait_network = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('network', 'wait_for_resource_deletion'),
+ autospec=True))
+ self.mock_kp = self.useFixture(fixtures.MockPatch(
+ KP_CLIENT % 'delete_keypair', autospec=True))
+ self.mock_fip_compute = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('compute', 'delete_floating_ip'), autospec=True))
+ self.mock_fip_network = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('network', 'delete_floatingip'), autospec=True))
+ self.os = clients.ServiceClients(
+ fake_credentials.FakeKeystoneV3Credentials(), 'fake_uri')
+
+ def test_clear_validation_resources_nova_net(self):
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NOVA_NET['floating_ip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=False)
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ for call in self.mock_kp.mock.call_args_list[1:]:
+ self.assertIn(FAKE_KEYPAIR['keypair']['name'], call[1].values())
+ self.assertGreater(self.mock_sg_compute.mock.call_count, 0)
+ for call in self.mock_sg_compute.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertGreater(self.mock_sg_wait_compute.mock.call_count, 0)
+ for call in self.mock_sg_wait_compute.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_sg_network.mock.call_count, 0)
+ self.assertEqual(self.mock_sg_wait_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_compute.mock.call_count, 0)
+ for call in self.mock_fip_compute.mock.call_args_list[1:]:
+ self.assertIn(FAKE_FIP_NOVA_NET['floating_ip']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_fip_network.mock.call_count, 0)
+
+ def test_clear_validation_resources_neutron(self):
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ for call in self.mock_kp.mock.call_args_list[1:]:
+ self.assertIn(FAKE_KEYPAIR['keypair']['name'], call[1].values())
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ for call in self.mock_sg_network.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertGreater(self.mock_sg_wait_network.mock.call_count, 0)
+ for call in self.mock_sg_wait_network.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_sg_compute.mock.call_count, 0)
+ self.assertEqual(self.mock_sg_wait_compute.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+ for call in self.mock_fip_network.mock.call_args_list[1:]:
+ self.assertIn(FAKE_FIP_NEUTRON['floatingip']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_fip_compute.mock.call_count, 0)
+
+ def test_clear_validation_resources_exceptions(self):
+ # Test that even with exceptions all cleanups are invoked and that only
+ # the first exception is reported.
+ # NOTE(andreaf) There's not way of knowing which exception is going to
+ # be raised first unless we enforce which resource is cleared first,
+ # which is not really interesting, but also not harmful. keypair first.
+ self.mock_kp.mock.side_effect = Exception('keypair exception')
+ self.mock_sg_network.mock.side_effect = Exception('sg exception')
+ self.mock_fip_network.mock.side_effect = Exception('fip exception')
+ with testtools.ExpectedException(Exception, value_re='keypair'):
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ # Clients calls are still made, but not the wait call
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+ def test_clear_validation_resources_wait_not_found_wait(self):
+ # Test that a not found on wait is not an exception
+ self.mock_sg_wait_network.mock.side_effect = lib_exc.NotFound('yay')
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ # Clients calls are still made, but not the wait call
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_wait_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+ def test_clear_validation_resources_wait_not_found_delete(self):
+ # Test that a not found on delete is not an exception
+ self.mock_kp.mock.side_effect = lib_exc.NotFound('yay')
+ self.mock_sg_network.mock.side_effect = lib_exc.NotFound('yay')
+ self.mock_fip_network.mock.side_effect = lib_exc.NotFound('yay')
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ # Clients calls are still made, but not the wait call
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertEqual(self.mock_sg_wait_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+
+class TestValidationResourcesFixture(base.TestCase):
+
+ @mock.patch.object(vr, 'create_validation_resources', autospec=True)
+ def test_use_fixture(self, mock_vr):
+ exp_vr = dict(keypair='keypair',
+ floating_ip='floating_ip',
+ security_group='security_group')
+ mock_vr.return_value = exp_vr
+ exp_clients = 'clients'
+ exp_parameters = dict(keypair=True, floating_ip=True,
+ security_group=True, security_group_rules=True,
+ ethertype='v6', use_neutron=True,
+ floating_network_id='fnid',
+ floating_network_name='fnname')
+ # First mock cleanup
+ self.useFixture(fixtures.MockPatchObject(
+ vr, 'clear_validation_resources', autospec=True))
+ # And then use vr fixture, so when the fixture is cleaned-up, the mock
+ # is still there
+ vr_fixture = self.useFixture(vr.ValidationResourcesFixture(
+ exp_clients, **exp_parameters))
+ # Assert vr have been provisioned
+ mock_vr.assert_called_once_with(exp_clients, **exp_parameters)
+ # Assert vr have been setup in the fixture
+ self.assertEqual(exp_vr, vr_fixture.resources)
+
+ @mock.patch.object(vr, 'clear_validation_resources', autospec=True)
+ @mock.patch.object(vr, 'create_validation_resources', autospec=True)
+ def test_use_fixture_context(self, mock_vr, mock_clear):
+ exp_vr = dict(keypair='keypair',
+ floating_ip='floating_ip',
+ security_group='security_group')
+ mock_vr.return_value = exp_vr
+ exp_clients = 'clients'
+ exp_parameters = dict(keypair=True, floating_ip=True,
+ security_group=True, security_group_rules=True,
+ ethertype='v6', use_neutron=True,
+ floating_network_id='fnid',
+ floating_network_name='fnname')
+ with vr.ValidationResourcesFixture(exp_clients,
+ **exp_parameters) as vr_fixture:
+ # Assert vr have been provisioned
+ mock_vr.assert_called_once_with(exp_clients, **exp_parameters)
+ # Assert vr have been setup in the fixture
+ self.assertEqual(exp_vr, vr_fixture.resources)
+ # After context manager is closed, clear is invoked
+ exp_vr['use_neutron'] = exp_parameters['use_neutron']
+ mock_clear.assert_called_once_with(exp_clients, **exp_vr)
diff --git a/tempest/tests/lib/common/utils/linux/test_remote_client.py b/tempest/tests/lib/common/utils/linux/test_remote_client.py
index cf312f4..7a21a5f 100644
--- a/tempest/tests/lib/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/lib/common/utils/linux/test_remote_client.py
@@ -34,7 +34,7 @@
client = remote_client.RemoteClient('192.168.1.10', 'username')
client.exec_command('ls')
mock_ssh_exec_command.assert_called_once_with(
- 'set -eu -o pipefail; PATH=$$PATH:/sbin; ls')
+ 'set -eu -o pipefail; PATH=$PATH:/sbin; ls')
@mock.patch.object(ssh.Client, 'test_connection_auth')
def test_validate_authentication(self, mock_test_connection_auth):
diff --git a/tempest/tests/lib/common/utils/test_data_utils.py b/tempest/tests/lib/common/utils/test_data_utils.py
index 8bdf70e..b8385b2 100644
--- a/tempest/tests/lib/common/utils/test_data_utils.py
+++ b/tempest/tests/lib/common/utils/test_data_utils.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import netaddr
-
from tempest.lib.common.utils import data_utils
from tempest.tests import base
@@ -81,7 +79,11 @@
self.assertEqual(len(actual), 3)
self.assertRegex(actual, "[A-Za-z0-9~!@#%^&*_=+]{3}")
actual2 = data_utils.rand_password(2)
- self.assertNotEqual(actual, actual2)
+ # NOTE(masayukig): Originally, we checked that the acutal and actual2
+ # are different each other. But only 3 letters can be the same value
+ # in a very rare case. So, we just check the length here, too,
+ # just in case.
+ self.assertEqual(len(actual2), 3)
def test_rand_url(self):
actual = data_utils.rand_url()
@@ -137,43 +139,6 @@
actual = data_utils.random_bytes(size=2048)
self.assertEqual(2048, len(actual))
- def test_get_ipv6_addr_by_EUI64(self):
- actual = data_utils.get_ipv6_addr_by_EUI64('2001:db8::',
- '00:16:3e:33:44:55')
- self.assertIsInstance(actual, netaddr.IPAddress)
- self.assertEqual(actual,
- netaddr.IPAddress('2001:db8::216:3eff:fe33:4455'))
-
- def test_get_ipv6_addr_by_EUI64_with_IPv4_prefix(self):
- ipv4_prefix = '10.0.8'
- mac = '00:16:3e:33:44:55'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- ipv4_prefix, mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_cidr_type(self):
- bad_cidr = 123
- mac = '00:16:3e:33:44:55'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- bad_cidr, mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_cidr_value(self):
- bad_cidr = 'bb'
- mac = '00:16:3e:33:44:55'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- bad_cidr, mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_mac_value(self):
- cidr = '2001:db8::'
- bad_mac = '00:16:3e:33:44:5Z'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- cidr, bad_mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_mac_type(self):
- cidr = '2001:db8::'
- bad_mac = 99999999999999999999
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- cidr, bad_mac)
-
def test_chunkify(self):
data = "aaa"
chunks = data_utils.chunkify(data, 2)
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index 29c5684..f638ba6 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -81,11 +81,13 @@
@mock.patch('time.sleep')
@mock.patch('time.time')
def test_call_until_true_when_f_never_returns_true(self, m_time, m_sleep):
+ def set_value(bool_value):
+ return bool_value
timeout = 42 # The value doesn't matter as we mock time.time()
sleep = 60 # The value doesn't matter as we mock time.sleep()
m_time.side_effect = utils.generate_timeout_series(timeout)
self.assertEqual(
- False, test_utils.call_until_true(lambda: False, timeout, sleep)
+ False, test_utils.call_until_true(set_value, timeout, sleep, False)
)
m_sleep.call_args_list = [mock.call(sleep)] * 2
m_time.call_args_list = [mock.call()] * 2
@@ -93,11 +95,30 @@
@mock.patch('time.sleep')
@mock.patch('time.time')
def test_call_until_true_when_f_returns_true(self, m_time, m_sleep):
+ def set_value(bool_value=False):
+ return bool_value
timeout = 42 # The value doesn't matter as we mock time.time()
sleep = 60 # The value doesn't matter as we mock time.sleep()
m_time.return_value = 0
self.assertEqual(
- True, test_utils.call_until_true(lambda: True, timeout, sleep)
+ True, test_utils.call_until_true(set_value, timeout, sleep,
+ bool_value=True)
)
self.assertEqual(0, m_sleep.call_count)
- self.assertEqual(1, m_time.call_count)
+ # when logging cost time we need to acquire current time.
+ self.assertEqual(2, m_time.call_count)
+
+ @mock.patch('time.sleep')
+ @mock.patch('time.time')
+ def test_call_until_true_when_f_returns_true_no_param(
+ self, m_time, m_sleep):
+ def set_value(bool_value=False):
+ return bool_value
+ timeout = 42 # The value doesn't matter as we mock time.time()
+ sleep = 60 # The value doesn't matter as we mock time.sleep()
+ m_time.side_effect = utils.generate_timeout_series(timeout)
+ self.assertEqual(
+ False, test_utils.call_until_true(set_value, timeout, sleep)
+ )
+ m_sleep.call_args_list = [mock.call(sleep)] * 2
+ m_time.call_args_list = [mock.call()] * 2
diff --git a/tempest/tests/lib/services/base.py b/tempest/tests/lib/services/base.py
index 778c966..924f9f2 100644
--- a/tempest/tests/lib/services/base.py
+++ b/tempest/tests/lib/services/base.py
@@ -32,6 +32,7 @@
def check_service_client_function(self, function, function2mock,
body, to_utf=False, status=200,
headers=None, mock_args=None,
+ resp_as_string=False,
**kwargs):
"""Mock a service client function for unit testing.
@@ -53,6 +54,9 @@
``assert_called_once_with(foo='bar')`` is called.
* If mock_args='foo' then ``assert_called_once_with('foo')``
is called.
+ :param resp_as_string: Whether response body is retruned as string.
+ This is for service client methods which return ResponseBodyData
+ object.
:param kwargs: kwargs that are passed to function.
"""
mocked_response = self.create_response(body, to_utf, status, headers)
@@ -62,8 +66,9 @@
resp = function(**kwargs)
else:
resp = function()
+ if resp_as_string:
+ resp = resp.data
self.assertEqual(body, resp)
-
if isinstance(mock_args, list):
fixture.mock.assert_called_once_with(*mock_args)
elif isinstance(mock_args, dict):
diff --git a/tempest/tests/lib/services/compute/test_services_client.py b/tempest/tests/lib/services/compute/test_services_client.py
index 41da39c..2dd981c 100644
--- a/tempest/tests/lib/services/compute/test_services_client.py
+++ b/tempest/tests/lib/services/compute/test_services_client.py
@@ -14,6 +14,9 @@
import copy
+import mock
+
+from tempest.lib.services.compute import base_compute_client
from tempest.lib.services.compute import services_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -44,11 +47,21 @@
}
}
+ FAKE_UPDATE_FORCED_DOWN = {
+ "service":
+ {
+ "forced_down": True,
+ "binary": "nova-conductor",
+ "host": "controller"
+ }
+ }
+
def setUp(self):
super(TestServicesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = services_client.ServicesClient(
fake_auth, 'compute', 'regionOne')
+ self.addCleanup(mock.patch.stopall)
def test_list_services_with_str_body(self):
self.check_service_client_function(
@@ -68,7 +81,7 @@
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_SERVICE,
bytes_body,
- host_name="nova-conductor", binary="controller")
+ host="nova-conductor", binary="controller")
def test_enable_service_with_str_body(self):
self._test_enable_service()
@@ -85,10 +98,49 @@
'tempest.lib.common.rest_client.RestClient.put',
fake_service,
bytes_body,
- host_name="nova-conductor", binary="controller")
+ host="nova-conductor", binary="controller")
def test_disable_service_with_str_body(self):
self._test_disable_service()
def test_disable_service_with_bytes_body(self):
self._test_disable_service(bytes_body=True)
+
+ def _test_log_reason_disabled_service(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE)
+ resp_body['service']['disabled_reason'] = 'test reason'
+
+ self.check_service_client_function(
+ self.client.disable_log_reason,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ bytes_body,
+ host="nova-conductor",
+ binary="controller",
+ disabled_reason='test reason')
+
+ def test_log_reason_disabled_service_with_str_body(self):
+ self._test_log_reason_disabled_service()
+
+ def test_log_reason_disabled_service_with_bytes_body(self):
+ self._test_log_reason_disabled_service(bytes_body=True)
+
+ def _test_update_forced_down(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_forced_down,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_FORCED_DOWN,
+ bytes_body,
+ host="nova-conductor",
+ binary="controller",
+ forced_down=True)
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.11'))
+ def test_update_forced_down_with_str_body(self, _):
+ self._test_update_forced_down()
+
+ @mock.patch.object(base_compute_client, 'COMPUTE_MICROVERSION',
+ new_callable=mock.PropertyMock(return_value='2.11'))
+ def test_update_forced_down_with_bytes_body(self, _):
+ self._test_update_forced_down(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_catalog_client.py b/tempest/tests/lib/services/identity/v3/test_catalog_client.py
new file mode 100644
index 0000000..0ac8fe4
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_catalog_client.py
@@ -0,0 +1,86 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import catalog_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestCatalogClient(base.BaseServiceTest):
+ FAKE_CATALOG_INFO = {
+ 'catalog': [
+ {
+ 'endpoints': [
+ {
+ 'id': '39dc322ce86c4111b4f06c2eeae0841b',
+ 'interface': 'public',
+ 'region': 'RegionOne',
+ 'url': 'http://localhost:5000'
+ },
+ ],
+ 'id': 'ac58672276f848a7b1727850b3ebe826',
+ 'type': 'compute',
+ 'name': 'nova'
+ },
+ {
+ 'endpoints': [
+ {
+ 'id': '39dc322ce86c4111b4f06c2eeae0841b',
+ 'interface': 'public',
+ 'region': 'RegionOne',
+ 'url': 'http://localhost:5000'
+ },
+ ],
+ 'id': 'b7c5ed2b486a46dbb4c221499d22991c',
+ 'type': 'image',
+ 'name': 'glance'
+ },
+ {
+ 'endpoints': [
+ {
+ 'id': '39dc322ce86c4111b4f06c2eeae0841b',
+ 'interface': 'public',
+ 'region': 'RegionOne',
+ 'url': 'http://localhost:5000'
+ },
+ ],
+ 'id': '4363ae44bdf34a3981fde3b823cb9aa2',
+ 'type': 'identity',
+ 'name': 'keystone'
+ }
+
+ ],
+ 'links': {
+ 'self': 'http://localhost/identity/v3/catalog',
+ 'previous': None,
+ 'next': None
+ }
+ }
+
+ def setUp(self):
+ super(TestCatalogClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = catalog_client.CatalogClient(fake_auth, 'identity',
+ 'RegionOne')
+
+ def test_show_catalog_with_bytes_body(self):
+ self._test_show_catalog(bytes_body=True)
+
+ def test_show_catalog_with_str_body(self):
+ self._test_show_catalog()
+
+ def _test_show_catalog(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_catalog,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_CATALOG_INFO,
+ bytes_body)
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoint_groups_client.py b/tempest/tests/lib/services/identity/v3/test_endpoint_groups_client.py
index 8b034e6..c724f0a 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoint_groups_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoint_groups_client.py
@@ -1,162 +1,162 @@
-# Copyright 2017 AT&T Corporation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.identity.v3 import endpoint_groups_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestEndPointGroupsClient(base.BaseServiceTest):
- FAKE_CREATE_ENDPOINT_GROUP = {
- "endpoint_group": {
- "id": 1,
- "name": "FAKE_ENDPOINT_GROUP",
- "description": "FAKE SERVICE ENDPOINT GROUP",
- "filters": {
- "service_id": 1
- }
- }
- }
-
- FAKE_ENDPOINT_GROUP_INFO = {
- "endpoint_group": {
- "id": 1,
- "name": "FAKE_ENDPOINT_GROUP",
- "description": "FAKE SERVICE ENDPOINT GROUP",
- "links": {
- "self": "http://example.com/identity/v3/OS-EP-FILTER/" +
- "endpoint_groups/1"
- },
- "filters": {
- "service_id": 1
- }
- }
- }
-
- FAKE_LIST_ENDPOINT_GROUPS = {
- "endpoint_groups": [
- {
- "id": 1,
- "name": "SERVICE_GROUP1",
- "description": "FAKE SERVICE ENDPOINT GROUP",
- "links": {
- "self": "http://example.com/identity/v3/OS-EP-FILTER/" +
- "endpoint_groups/1"
- },
- "filters": {
- "service_id": 1
- }
- },
- {
- "id": 2,
- "name": "SERVICE_GROUP2",
- "description": "FAKE SERVICE ENDPOINT GROUP",
- "links": {
- "self": "http://example.com/identity/v3/OS-EP-FILTER/" +
- "endpoint_groups/2"
- },
- "filters": {
- "service_id": 2
- }
- }
- ]
- }
-
- def setUp(self):
- super(TestEndPointGroupsClient, self).setUp()
- fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = endpoint_groups_client.EndPointGroupsClient(
- fake_auth, 'identity', 'regionOne')
-
- def _test_create_endpoint_group(self, bytes_body=False):
- self.check_service_client_function(
- self.client.create_endpoint_group,
- 'tempest.lib.common.rest_client.RestClient.post',
- self.FAKE_CREATE_ENDPOINT_GROUP,
- bytes_body,
- status=201,
- name="FAKE_ENDPOINT_GROUP",
- filters={'service_id': "1"})
-
- def _test_show_endpoint_group(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_endpoint_group,
- 'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_ENDPOINT_GROUP_INFO,
- bytes_body,
- endpoint_group_id="1")
-
- def _test_check_endpoint_group(self, bytes_body=False):
- self.check_service_client_function(
- self.client.check_endpoint_group,
- 'tempest.lib.common.rest_client.RestClient.head',
- {},
- bytes_body,
- status=200,
- endpoint_group_id="1")
-
- def _test_update_endpoint_group(self, bytes_body=False):
- self.check_service_client_function(
- self.client.update_endpoint_group,
- 'tempest.lib.common.rest_client.RestClient.patch',
- self.FAKE_ENDPOINT_GROUP_INFO,
- bytes_body,
- endpoint_group_id="1",
- name="NewName")
-
- def _test_list_endpoint_groups(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_endpoint_groups,
- 'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_LIST_ENDPOINT_GROUPS,
- bytes_body)
-
- def test_create_endpoint_group_with_str_body(self):
- self._test_create_endpoint_group()
-
- def test_create_endpoint_group_with_bytes_body(self):
- self._test_create_endpoint_group(bytes_body=True)
-
- def test_show_endpoint_group_with_str_body(self):
- self._test_show_endpoint_group()
-
- def test_show_endpoint_group_with_bytes_body(self):
- self._test_show_endpoint_group(bytes_body=True)
-
- def test_check_endpoint_group_with_str_body(self):
- self._test_check_endpoint_group()
-
- def test_check_endpoint_group_with_bytes_body(self):
- self._test_check_endpoint_group(bytes_body=True)
-
- def test_list_endpoint_groups_with_str_body(self):
- self._test_list_endpoint_groups()
-
- def test_list_endpoint_groups_with_bytes_body(self):
- self._test_list_endpoint_groups(bytes_body=True)
-
- def test_update_endpoint_group_with_str_body(self):
- self._test_update_endpoint_group()
-
- def test_update_endpoint_group_with_bytes_body(self):
- self._test_update_endpoint_group(bytes_body=True)
-
- def test_delete_endpoint_group(self):
- self.check_service_client_function(
- self.client.delete_endpoint_group,
- 'tempest.lib.common.rest_client.RestClient.delete',
- {},
- endpoint_group_id="1",
- status=204)
+# Copyright 2017 AT&T Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.identity.v3 import endpoint_groups_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestEndPointGroupsClient(base.BaseServiceTest):
+ FAKE_CREATE_ENDPOINT_GROUP = {
+ "endpoint_group": {
+ "id": 1,
+ "name": "FAKE_ENDPOINT_GROUP",
+ "description": "FAKE SERVICE ENDPOINT GROUP",
+ "filters": {
+ "service_id": 1
+ }
+ }
+ }
+
+ FAKE_ENDPOINT_GROUP_INFO = {
+ "endpoint_group": {
+ "id": 1,
+ "name": "FAKE_ENDPOINT_GROUP",
+ "description": "FAKE SERVICE ENDPOINT GROUP",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-EP-FILTER/" +
+ "endpoint_groups/1"
+ },
+ "filters": {
+ "service_id": 1
+ }
+ }
+ }
+
+ FAKE_LIST_ENDPOINT_GROUPS = {
+ "endpoint_groups": [
+ {
+ "id": 1,
+ "name": "SERVICE_GROUP1",
+ "description": "FAKE SERVICE ENDPOINT GROUP",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-EP-FILTER/" +
+ "endpoint_groups/1"
+ },
+ "filters": {
+ "service_id": 1
+ }
+ },
+ {
+ "id": 2,
+ "name": "SERVICE_GROUP2",
+ "description": "FAKE SERVICE ENDPOINT GROUP",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-EP-FILTER/" +
+ "endpoint_groups/2"
+ },
+ "filters": {
+ "service_id": 2
+ }
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestEndPointGroupsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = endpoint_groups_client.EndPointGroupsClient(
+ fake_auth, 'identity', 'regionOne')
+
+ def _test_create_endpoint_group(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_endpoint_group,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_ENDPOINT_GROUP,
+ bytes_body,
+ status=201,
+ name="FAKE_ENDPOINT_GROUP",
+ filters={'service_id': "1"})
+
+ def _test_show_endpoint_group(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_endpoint_group,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ENDPOINT_GROUP_INFO,
+ bytes_body,
+ endpoint_group_id="1")
+
+ def _test_check_endpoint_group(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.check_endpoint_group,
+ 'tempest.lib.common.rest_client.RestClient.head',
+ {},
+ bytes_body,
+ status=200,
+ endpoint_group_id="1")
+
+ def _test_update_endpoint_group(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_endpoint_group,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_ENDPOINT_GROUP_INFO,
+ bytes_body,
+ endpoint_group_id="1",
+ name="NewName")
+
+ def _test_list_endpoint_groups(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_endpoint_groups,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_ENDPOINT_GROUPS,
+ bytes_body)
+
+ def test_create_endpoint_group_with_str_body(self):
+ self._test_create_endpoint_group()
+
+ def test_create_endpoint_group_with_bytes_body(self):
+ self._test_create_endpoint_group(bytes_body=True)
+
+ def test_show_endpoint_group_with_str_body(self):
+ self._test_show_endpoint_group()
+
+ def test_show_endpoint_group_with_bytes_body(self):
+ self._test_show_endpoint_group(bytes_body=True)
+
+ def test_check_endpoint_group_with_str_body(self):
+ self._test_check_endpoint_group()
+
+ def test_check_endpoint_group_with_bytes_body(self):
+ self._test_check_endpoint_group(bytes_body=True)
+
+ def test_list_endpoint_groups_with_str_body(self):
+ self._test_list_endpoint_groups()
+
+ def test_list_endpoint_groups_with_bytes_body(self):
+ self._test_list_endpoint_groups(bytes_body=True)
+
+ def test_update_endpoint_group_with_str_body(self):
+ self._test_update_endpoint_group()
+
+ def test_update_endpoint_group_with_bytes_body(self):
+ self._test_update_endpoint_group(bytes_body=True)
+
+ def test_delete_endpoint_group(self):
+ self.check_service_client_function(
+ self.client.delete_endpoint_group,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ endpoint_group_id="1",
+ status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_identity_client.py b/tempest/tests/lib/services/identity/v3/test_identity_client.py
index e435fe2..3739fe6 100644
--- a/tempest/tests/lib/services/identity/v3/test_identity_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_identity_client.py
@@ -60,6 +60,34 @@
}
}
+ FAKE_AUTH_DOMAINS = {
+ "domains": [
+ {
+ "description": "my domain description",
+ "enabled": True,
+ "id": "1789d1",
+ "links": {
+ "self": "https://example.com/identity/v3/domains/1789d1"
+ },
+ "name": "my domain"
+ },
+ {
+ "description": "description of my other domain",
+ "enabled": True,
+ "id": "43e8da",
+ "links": {
+ "self": "https://example.com/identity/v3/domains/43e8da"
+ },
+ "name": "another domain"
+ }
+ ],
+ "links": {
+ "self": "https://example.com/identity/v3/auth/domains",
+ "previous": None,
+ "next": None
+ }
+ }
+
def setUp(self):
super(TestIdentityClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -89,6 +117,13 @@
self.FAKE_AUTH_PROJECTS,
bytes_body)
+ def _test_list_auth_domains(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_auth_domains,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_AUTH_DOMAINS,
+ bytes_body)
+
def test_show_api_description_with_str_body(self):
self._test_show_api_description()
@@ -109,8 +144,22 @@
resp_token="cbc36478b0bd8e67e89",
status=204)
+ def test_check_token_existence(self):
+ self.check_service_client_function(
+ self.client.check_token_existence,
+ 'tempest.lib.common.rest_client.RestClient.head',
+ {},
+ resp_token="cbc36478b0bd8e67e89",
+ status=200)
+
def test_list_auth_projects_with_str_body(self):
self._test_list_auth_projects()
def test_list_auth_projects_with_bytes_body(self):
self._test_list_auth_projects(bytes_body=True)
+
+ def test_list_auth_domains_with_str_body(self):
+ self._test_list_auth_domains()
+
+ def test_list_auth_domains_with_bytes_body(self):
+ self._test_list_auth_domains(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_oauth_token_client.py b/tempest/tests/lib/services/identity/v3/test_oauth_token_client.py
index b9b9b15..420ea5f 100644
--- a/tempest/tests/lib/services/identity/v3/test_oauth_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_oauth_token_client.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslotest import mockpatch
+import fixtures
from tempest.lib.services.identity.v3 import oauth_token_client
from tempest.tests.lib import fake_auth_provider
@@ -137,7 +137,7 @@
def test_create_request_token(self):
mock_resp = self._mock_token_response(self.FAKE_CREATE_REQUEST_TOKEN)
resp = fake_http.fake_http_response(None, status=201), mock_resp
- self.useFixture(mockpatch.Patch(
+ self.useFixture(fixtures.MockPatch(
'tempest.lib.common.rest_client.RestClient.post',
return_value=resp))
@@ -157,7 +157,7 @@
mock_resp = self._mock_token_response(self.FAKE_CREATE_ACCESS_TOKEN)
req_secret = self.FAKE_CREATE_REQUEST_TOKEN['oauth_token_secret']
resp = fake_http.fake_http_response(None, status=201), mock_resp
- self.useFixture(mockpatch.Patch(
+ self.useFixture(fixtures.MockPatch(
'tempest.lib.common.rest_client.RestClient.post',
return_value=resp))
diff --git a/tempest/tests/lib/services/image/v2/test_images_client.py b/tempest/tests/lib/services/image/v2/test_images_client.py
index 9648985..ee4d4cb 100644
--- a/tempest/tests/lib/services/image/v2/test_images_client.py
+++ b/tempest/tests/lib/services/image/v2/test_images_client.py
@@ -12,6 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
+
+from tempest.lib.common.utils import data_utils
from tempest.lib.services.image.v2 import images_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -42,6 +45,57 @@
"container_format": None
}
+ FAKE_LIST_IMAGES = {
+ "images": [
+ {
+ "status": "active",
+ "name": "cirros-0.3.2-x86_64-disk",
+ "tags": [],
+ "container_format": "bare",
+ "created_at": "2014-11-07T17:07:06Z",
+ "disk_format": "qcow2",
+ "updated_at": "2014-11-07T17:19:09Z",
+ "visibility": "public",
+ "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27",
+ "min_disk": 0,
+ "protected": False,
+ "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27",
+ "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file",
+ "checksum": "64d7c1cd2b6f60c92c14662941cb7913",
+ "owner": "5ef70662f8b34079a6eddb8da9d75fe8",
+ "size": 13167616,
+ "min_ram": 0,
+ "schema": "/v2/schemas/image",
+ "virtual_size": None
+ },
+ {
+ "status": "active",
+ "name": "F17-x86_64-cfntools",
+ "tags": [],
+ "container_format": "bare",
+ "created_at": "2014-10-30T08:23:39Z",
+ "disk_format": "qcow2",
+ "updated_at": "2014-11-03T16:40:10Z",
+ "visibility": "public",
+ "self": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c",
+ "min_disk": 0,
+ "protected": False,
+ "id": "781b3762-9469-4cec-b58d-3349e5de4e9c",
+ "file": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c/file",
+ "checksum": "afab0f79bac770d61d24b4d0560b5f70",
+ "owner": "5ef70662f8b34079a6eddb8da9d75fe8",
+ "size": 476704768,
+ "min_ram": 0,
+ "schema": "/v2/schemas/image",
+ "virtual_size": None
+ }
+ ],
+ "schema": "/v2/schemas/images",
+ "first": "/v2/images"
+ }
+
+ FAKE_TAG_NAME = "fake tag"
+
def setUp(self):
super(TestImagesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -74,6 +128,14 @@
bytes_body,
image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8")
+ def _test_list_images(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_images,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_IMAGES,
+ bytes_body,
+ mock_args=['images'])
+
def test_create_image_with_str_body(self):
self._test_create_image()
@@ -104,8 +166,56 @@
'tempest.lib.common.rest_client.RestClient.delete',
{}, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8", status=204)
+ def test_store_image_file(self):
+ data = six.BytesIO(data_utils.random_bytes())
+
+ self.check_service_client_function(
+ self.client.store_image_file,
+ 'tempest.lib.common.rest_client.RestClient.raw_request',
+ {},
+ image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
+ status=204,
+ data=data)
+
+ def test_show_image_file(self):
+ # NOTE: The response for this API returns raw binary data, but an error
+ # is thrown if random bytes are used for the resp body since
+ # ``create_response`` then calls ``json.dumps``.
+ self.check_service_client_function(
+ self.client.show_image_file,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ {},
+ resp_as_string=True,
+ image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
+ headers={'Content-Type': 'application/octet-stream'},
+ status=200)
+
+ def test_add_image_tag(self):
+ self.check_service_client_function(
+ self.client.add_image_tag,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
+ status=204,
+ tag=self.FAKE_TAG_NAME)
+
+ def test_delete_image_tag(self):
+ self.check_service_client_function(
+ self.client.delete_image_tag,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
+ status=204,
+ tag=self.FAKE_TAG_NAME)
+
def test_show_image_with_str_body(self):
self._test_show_image()
def test_show_image_with_bytes_body(self):
self._test_show_image(bytes_body=True)
+
+ def test_list_images_with_str_body(self):
+ self._test_list_images()
+
+ def test_list_images_with_bytes_body(self):
+ self._test_list_images(bytes_body=True)
diff --git a/tempest/tests/services/object_storage/__init__.py b/tempest/tests/lib/services/object_storage/__init__.py
similarity index 100%
rename from tempest/tests/services/object_storage/__init__.py
rename to tempest/tests/lib/services/object_storage/__init__.py
diff --git a/tempest/tests/lib/services/object_storage/test_bulk_middleware_client.py b/tempest/tests/lib/services/object_storage/test_bulk_middleware_client.py
new file mode 100644
index 0000000..08028c3
--- /dev/null
+++ b/tempest/tests/lib/services/object_storage/test_bulk_middleware_client.py
@@ -0,0 +1,66 @@
+# Copyright 2017 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.object_storage import bulk_middleware_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestBulkMiddlewareClient(base.BaseServiceTest):
+
+ def setUp(self):
+ super(TestBulkMiddlewareClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = bulk_middleware_client.BulkMiddlewareClient(
+ fake_auth, 'object-storage', 'regionOne')
+
+ def test_upload_archive(self):
+ url = 'test_path?extract-archive=tar'
+ data = 'test_data'
+ self.check_service_client_function(
+ self.client.upload_archive,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ mock_args=[url, data, {}],
+ resp_as_string=True,
+ upload_path='test_path', data=data, archive_file_format='tar')
+
+ def test_delete_bulk_data(self):
+ url = '?bulk-delete'
+ data = 'test_data'
+ self.check_service_client_function(
+ self.client.delete_bulk_data,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ mock_args=[url, {}, data],
+ resp_as_string=True,
+ data=data)
+
+ def _test_delete_bulk_data_with_post(self, status):
+ url = '?bulk-delete'
+ data = 'test_data'
+ self.check_service_client_function(
+ self.client.delete_bulk_data_with_post,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {},
+ mock_args=[url, data, {}],
+ resp_as_string=True,
+ status=status,
+ data=data)
+
+ def test_delete_bulk_data_with_post_200(self):
+ self._test_delete_bulk_data_with_post(200)
+
+ def test_delete_bulk_data_with_post_204(self):
+ self._test_delete_bulk_data_with_post(204)
diff --git a/tempest/tests/lib/services/object_storage/test_capabilities_client.py b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
new file mode 100644
index 0000000..b7f972a
--- /dev/null
+++ b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
@@ -0,0 +1,54 @@
+# Copyright 2016 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.lib.services.object_storage import capabilities_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestCapabilitiesClient(base.BaseServiceTest):
+
+ def setUp(self):
+ super(TestCapabilitiesClient, self).setUp()
+ self.fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.url = self.fake_auth.base_url(None)
+ self.client = capabilities_client.CapabilitiesClient(
+ self.fake_auth, 'swift', 'region1')
+
+ def _test_list_capabilities(self, bytes_body=False):
+ resp = {
+ "swift": {
+ "version": "1.11.0"
+ },
+ "slo": {
+ "max_manifest_segments": 1000,
+ "max_manifest_size": 2097152,
+ "min_segment_size": 1
+ },
+ "staticweb": {},
+ "tempurl": {}
+ }
+ self.check_service_client_function(
+ self.client.list_capabilities,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ resp,
+ bytes_body)
+
+ def test_list_capabilities_with_str_body(self):
+ self._test_list_capabilities()
+
+ def test_list_capabilities_with_bytes_body(self):
+ self._test_list_capabilities(True)
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
new file mode 100644
index 0000000..a16d1d7
--- /dev/null
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -0,0 +1,108 @@
+# Copyright 2016 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from tempest.lib import exceptions
+from tempest.lib.services.object_storage import object_client
+from tempest.tests import base
+from tempest.tests.lib import fake_auth_provider
+
+
+class TestObjectClient(base.TestCase):
+
+ def setUp(self):
+ super(TestObjectClient, self).setUp()
+ self.fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.url = self.fake_auth.base_url(None)
+ self.object_client = object_client.ObjectClient(self.fake_auth,
+ 'swift', 'region1')
+
+ @mock.patch.object(object_client, '_create_connection')
+ def test_create_object_continue_no_data(self, mock_poc):
+ self._validate_create_object_continue(None, mock_poc)
+
+ @mock.patch.object(object_client, '_create_connection')
+ def test_create_object_continue_with_data(self, mock_poc):
+ self._validate_create_object_continue('hello', mock_poc)
+
+ @mock.patch.object(object_client, '_create_connection')
+ def test_create_continue_with_no_continue_received(self, mock_poc):
+ self._validate_create_object_continue('hello', mock_poc,
+ initial_status=201)
+
+ def _validate_create_object_continue(self, req_data,
+ mock_poc, initial_status=100):
+
+ expected_hdrs = {
+ 'X-Auth-Token': self.fake_auth.get_token(),
+ 'content-length': 0 if req_data is None else len(req_data),
+ 'Expect': '100-continue'}
+
+ # Setup the Mocks prior to invoking the object creation
+ mock_resp_cls = mock.Mock()
+ mock_resp_cls._read_status.return_value = ("1", initial_status, "OK")
+
+ mock_poc.return_value.response_class.return_value = mock_resp_cls
+
+ # This is the final expected return value
+ mock_poc.return_value.getresponse.return_value.status = 201
+ mock_poc.return_value.getresponse.return_value.reason = 'OK'
+
+ # Call method to PUT object using expect:100-continue
+ cnt = "container1"
+ obj = "object1"
+ path = "/%s/%s" % (cnt, obj)
+
+ # If the expected initial status is not 100, then an exception
+ # should be thrown and the connection closed
+ if initial_status is 100:
+ status, reason = \
+ self.object_client.create_object_continue(cnt, obj, req_data)
+ else:
+ self.assertRaises(exceptions.UnexpectedResponseCode,
+ self.object_client.create_object_continue, cnt,
+ obj, req_data)
+ mock_poc.return_value.close.assert_called_once_with()
+
+ # Verify that putrequest is called 1 time with the appropriate values
+ mock_poc.return_value.putrequest.assert_called_once_with('PUT', path)
+
+ # Verify that headers were written, including "Expect:100-continue"
+ calls = []
+
+ for header, value in expected_hdrs.items():
+ calls.append(mock.call(header, value))
+
+ mock_poc.return_value.putheader.assert_has_calls(calls, False)
+ mock_poc.return_value.endheaders.assert_called_once_with()
+
+ # The following steps are only taken if the initial status is 100
+ if initial_status is 100:
+ # Verify that the method returned what it was supposed to
+ self.assertEqual(status, 201)
+
+ # Verify that _safe_read was called once to remove the CRLF
+ # after the 100 response
+ mock_rc = mock_poc.return_value.response_class.return_value
+ mock_rc._safe_read.assert_called_once_with(2)
+
+ # Verify the actual data was written via send
+ mock_poc.return_value.send.assert_called_once_with(req_data)
+
+ # Verify that the getresponse method was called to receive
+ # the final
+ mock_poc.return_value.getresponse.assert_called_once_with()
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
new file mode 100644
index 0000000..1da2112
--- /dev/null
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -0,0 +1,65 @@
+# Copyright 2017 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from tempest.lib.services import clients
+
+
+class RegistryFixture(fixtures.Fixture):
+ """A fixture to setup a test client registry
+
+ The clients registry is a singleton. In Tempest it's filled with
+ content from configuration. When testing Tempest lib classes without
+ configuration it's handy to have the registry setup to be able to access
+ service client factories.
+
+ This fixture sets up the registry using a fake plugin, which includes all
+ services specified at __init__ time. Any other plugin in the registry
+ is removed at setUp time. The fake plugin is removed from the registry
+ on cleanup.
+ """
+
+ PLUGIN_NAME = 'fake_plugin_for_test'
+
+ def __init__(self):
+ """Initialise the registry fixture"""
+ self.services = set(['compute', 'identity.v2', 'identity.v3',
+ 'image.v1', 'image.v2', 'network', 'volume.v1',
+ 'volume.v2', 'volume.v3', 'object-storage'])
+
+ def _setUp(self):
+ # Cleanup the registry
+ registry = clients.ClientsRegistry()
+ registry._service_clients = {}
+ # Prepare the clients for registration
+ all_clients = []
+ service_clients = clients.tempest_modules()
+ for sc in self.services:
+ sc_module = service_clients[sc]
+ sc_unversioned = sc.split('.')[0]
+ sc_name = sc.replace('.', '_').replace('-', '_')
+ # Pass the bare minimum params to satisfy the clients interface
+ service_client_data = dict(
+ name=sc_name, service_version=sc, service=sc_unversioned,
+ module_path=sc_module.__name__,
+ client_names=sc_module.__all__)
+ all_clients.append(service_client_data)
+ registry.register_service_client(self.PLUGIN_NAME, all_clients)
+
+ def _cleanup():
+ del registry._service_clients[self.PLUGIN_NAME]
+
+ self.addCleanup(_cleanup)
diff --git a/tempest/tests/lib/services/test_clients.py b/tempest/tests/lib/services/test_clients.py
index a837199..43fd88f 100644
--- a/tempest/tests/lib/services/test_clients.py
+++ b/tempest/tests/lib/services/test_clients.py
@@ -16,6 +16,7 @@
import fixtures
import mock
+import six
import testtools
from tempest.lib import auth
@@ -188,9 +189,7 @@
def setUp(self):
super(TestServiceClients, self).setUp()
self.useFixture(fixtures.MockPatch(
- 'tempest.lib.services.clients.tempest_modules', return_value={}))
- self.useFixture(fixtures.MockPatch(
- 'tempest.lib.services.clients._tempest_internal_modules',
+ 'tempest.lib.services.clients.tempest_modules',
return_value=set(['fake_service1'])))
def test___init___creds_v2_uri(self):
@@ -258,6 +257,58 @@
clients.ServiceClients(creds, identity_uri=uri,
client_parameters=params)
+ def test___init___plugin_service_clients_cannot_load(self):
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ uri = 'fake_uri'
+ fake_service_clients = {
+ 'service1': [{'name': 'client1',
+ 'service_version': 'client1.v1',
+ 'module_path': 'I cannot load this',
+ 'client_names': ['SomeClient1']}],
+ 'service2': [{'name': 'client2',
+ 'service_version': 'client2.v1',
+ 'module_path': 'This neither',
+ 'client_names': ['SomeClient1']}]}
+ msg = "(?=.*{0})(?=.*{1})".format(
+ *[x[1][0]['module_path'] for x in six.iteritems(
+ fake_service_clients)])
+ self.useFixture(fixtures.MockPatchObject(
+ clients.ClientsRegistry(), 'get_service_clients',
+ return_value=fake_service_clients))
+ with testtools.ExpectedException(
+ testtools.MultipleExceptions, value_re=msg):
+ clients.ServiceClients(creds, identity_uri=uri)
+
+ def test___init___plugin_service_clients_name_conflict(self):
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ uri = 'fake_uri'
+ fake_service_clients = {
+ 'serviceA': [{'name': 'client1',
+ 'service_version': 'client1.v1',
+ 'module_path': 'fake_path_1',
+ 'client_names': ['SomeClient1']}],
+ 'serviceB': [{'name': 'client1',
+ 'service_version': 'client1.v2',
+ 'module_path': 'fake_path_2',
+ 'client_names': ['SomeClient2']}],
+ 'serviceC': [{'name': 'client1',
+ 'service_version': 'client1.v1',
+ 'module_path': 'fake_path_2',
+ 'client_names': ['SomeClient1']}],
+ 'serviceD': [{'name': 'client1',
+ 'service_version': 'client1.v2',
+ 'module_path': 'fake_path_2',
+ 'client_names': ['SomeClient2']}]}
+ msg = "(?=.*{0})(?=.*{1})".format(
+ *[x[1][0]['service_version'] for x in six.iteritems(
+ fake_service_clients)])
+ self.useFixture(fixtures.MockPatchObject(
+ clients.ClientsRegistry(), 'get_service_clients',
+ return_value=fake_service_clients))
+ with testtools.ExpectedException(
+ testtools.MultipleExceptions, value_re=msg):
+ clients.ServiceClients(creds, identity_uri=uri)
+
def _get_manager(self, init_region='fake_region'):
# Get a manager to invoke _setup_parameters on
creds = fake_credentials.FakeKeystoneV2Credentials()
@@ -363,6 +414,7 @@
_manager = self._get_manager()
duplicate_service = 'fake_service1'
expected_error = '.*' + duplicate_service
+ _manager._registered_services = [duplicate_service]
with testtools.ExpectedException(
exceptions.ServiceClientRegistrationException, expected_error):
_manager.register_service_client_module(
diff --git a/tempest/tests/lib/services/volume/v2/test_availability_zone_client.py b/tempest/tests/lib/services/volume/v2/test_availability_zone_client.py
new file mode 100644
index 0000000..770565c
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v2/test_availability_zone_client.py
@@ -0,0 +1,51 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v2 import availability_zone_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestAvailabilityZoneClient(base.BaseServiceTest):
+
+ FAKE_AZ_LIST = {
+ "availabilityZoneInfo": [
+ {
+ "zoneState": {
+ "available": True
+ },
+ "zoneName": "nova"
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestAvailabilityZoneClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = availability_zone_client.AvailabilityZoneClient(
+ fake_auth, 'volume', 'regionOne')
+
+ def _test_list_availability_zones(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_availability_zones,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_AZ_LIST,
+ bytes_body)
+
+ def test_list_availability_zones_with_str_body(self):
+ self._test_list_availability_zones()
+
+ def test_list_availability_zones_with_bytes_body(self):
+ self._test_list_availability_zones(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v2/test_backups_client.py b/tempest/tests/lib/services/volume/v2/test_backups_client.py
new file mode 100644
index 0000000..14e5fb0
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v2/test_backups_client.py
@@ -0,0 +1,117 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v2 import backups_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestBackupsClient(base.BaseServiceTest):
+
+ FAKE_BACKUP_LIST = {
+ "backups": [
+ {
+ "id": "2ef47aee-8844-490c-804d-2a8efe561c65",
+ "links": [
+ {
+ "href": "fake-url-1",
+ "rel": "self"
+ },
+ {
+ "href": "fake-url-2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "backup001"
+ }
+ ]
+ }
+
+ FAKE_BACKUP_LIST_WITH_DETAIL = {
+ "backups": [
+ {
+ "availability_zone": "az1",
+ "container": "volumebackups",
+ "created_at": "2013-04-02T10:35:27.000000",
+ "description": None,
+ "fail_reason": None,
+ "id": "2ef47aee-8844-490c-804d-2a8efe561c65",
+ "links": [
+ {
+ "href": "fake-url-1",
+ "rel": "self"
+ },
+ {
+ "href": "fake-url-2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "backup001",
+ "object_count": 22,
+ "size": 1,
+ "status": "available",
+ "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
+ "is_incremental": True,
+ "has_dependent_backups": False
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestBackupsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = backups_client.BackupsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_backups(self, detail=False, mock_args='backups',
+ bytes_body=False, **params):
+ if detail:
+ resp_body = self.FAKE_BACKUP_LIST_WITH_DETAIL
+ else:
+ resp_body = self.FAKE_BACKUP_LIST
+ self.check_service_client_function(
+ self.client.list_backups,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=[mock_args],
+ detail=detail,
+ **params)
+
+ def test_list_backups_with_str_body(self):
+ self._test_list_backups()
+
+ def test_list_backups_with_bytes_body(self):
+ self._test_list_backups(bytes_body=True)
+
+ def test_list_backups_with_detail_with_str_body(self):
+ mock_args = "backups/detail"
+ self._test_list_backups(detail=True, mock_args=mock_args)
+
+ def test_list_backups_with_detail_with_bytes_body(self):
+ mock_args = "backups/detail"
+ self._test_list_backups(detail=True, mock_args=mock_args,
+ bytes_body=True)
+
+ def test_list_backups_with_params(self):
+ # Run the test separately for each param, to avoid assertion error
+ # resulting from randomized params order.
+ mock_args = 'backups?sort_key=name'
+ self._test_list_backups(mock_args=mock_args, sort_key='name')
+
+ mock_args = 'backups/detail?limit=10'
+ self._test_list_backups(detail=True, mock_args=mock_args,
+ bytes_body=True, limit=10)
diff --git a/tempest/tests/lib/services/volume/v2/test_extensions_client.py b/tempest/tests/lib/services/volume/v2/test_extensions_client.py
new file mode 100644
index 0000000..c0ee421
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v2/test_extensions_client.py
@@ -0,0 +1,70 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v2 import extensions_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestExtensionsClient(base.BaseServiceTest):
+
+ FAKE_EXTENSION_LIST = {
+ "extensions": [
+ {
+ "updated": "2012-03-12T00:00:00+00:00",
+ "name": "QuotaClasses",
+ "links": [],
+ "namespace": "fake-namespace-1",
+ "alias": "os-quota-class-sets",
+ "description": "Quota classes management support."
+ },
+ {
+ "updated": "2013-05-29T00:00:00+00:00",
+ "name": "VolumeTransfer",
+ "links": [],
+ "namespace": "fake-namespace-2",
+ "alias": "os-volume-transfer",
+ "description": "Volume transfer management support."
+ },
+ {
+ "updated": "2014-02-10T00:00:00+00:00",
+ "name": "VolumeManage",
+ "links": [],
+ "namespace": "fake-namespace-3",
+ "alias": "os-volume-manage",
+ "description": "Manage existing backend storage by Cinder."
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestExtensionsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = extensions_client.ExtensionsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_extensions(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_extensions,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_EXTENSION_LIST,
+ bytes_body)
+
+ def test_list_extensions_with_str_body(self):
+ self._test_list_extensions()
+
+ def test_list_extensions_with_bytes_body(self):
+ self._test_list_extensions(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v2/test_limits_client.py b/tempest/tests/lib/services/volume/v2/test_limits_client.py
new file mode 100644
index 0000000..202054c
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v2/test_limits_client.py
@@ -0,0 +1,59 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v2 import limits_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLimitsClient(base.BaseServiceTest):
+
+ FAKE_LIMIT_INFO = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "totalSnapshotsUsed": 0,
+ "maxTotalBackups": 10,
+ "maxTotalVolumeGigabytes": 1000,
+ "maxTotalSnapshots": 10,
+ "maxTotalBackupGigabytes": 1000,
+ "totalBackupGigabytesUsed": 0,
+ "maxTotalVolumes": 10,
+ "totalVolumesUsed": 0,
+ "totalBackupsUsed": 0,
+ "totalGigabytesUsed": 0
+ }
+ }
+ }
+
+ def setUp(self):
+ super(TestLimitsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = limits_client.LimitsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_show_limits(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_limits,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIMIT_INFO,
+ bytes_body)
+
+ def test_show_limits_with_str_body(self):
+ self._test_show_limits()
+
+ def test_show_limits_with_bytes_body(self):
+ self._test_show_limits(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v2/test_snapshot_manage_client.py b/tempest/tests/lib/services/volume/v2/test_snapshot_manage_client.py
new file mode 100644
index 0000000..3fe8970
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v2/test_snapshot_manage_client.py
@@ -0,0 +1,83 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.services.volume.v2 import snapshot_manage_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestSnapshotManageClient(base.BaseServiceTest):
+
+ SNAPSHOT_MANAGE_REQUEST = {
+ "snapshot": {
+ "description": "snapshot-manage-description",
+ "metadata": None,
+ "ref": {
+ "source-name": "_snapshot-22b71da0-94f9-4aca-ad45-7522b3fa96bb"
+ },
+ "name": "snapshot-managed",
+ "volume_id": "7c064b34-1e4b-40bd-93ca-4ac5a973661b"
+ }
+ }
+
+ SNAPSHOT_MANAGE_RESPONSE = {
+ "snapshot": {
+ "status": "creating",
+ "description": "snapshot-manage-description",
+ "updated_at": None,
+ "volume_id": "32bafcc8-7109-42cd-8342-70d8de2bedef",
+ "id": "8fd6eb9d-0a82-456d-b1ec-dea4ac7f1ee2",
+ "size": 1,
+ "name": "snapshot-managed",
+ "created_at": "2017-07-11T10:07:58.000000",
+ "metadata": {}
+ }
+ }
+
+ def setUp(self):
+ super(TestSnapshotManageClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = snapshot_manage_client.SnapshotManageClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_manage_snapshot(self, bytes_body=False):
+ payload = json.dumps(self.SNAPSHOT_MANAGE_REQUEST, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(snapshot_manage_client.json,
+ 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.manage_snapshot,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.SNAPSHOT_MANAGE_RESPONSE,
+ to_utf=bytes_body,
+ status=202,
+ mock_args=['os-snapshot-manage', payload],
+ **self.SNAPSHOT_MANAGE_REQUEST['snapshot'])
+
+ def test_manage_snapshot_with_str_body(self):
+ self._test_manage_snapshot()
+
+ def test_manage_snapshot_with_bytes_body(self):
+ self._test_manage_snapshot(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v2/test_snapshots_client.py b/tempest/tests/lib/services/volume/v2/test_snapshots_client.py
index 7d656f1..c9f57a0 100644
--- a/tempest/tests/lib/services/volume/v2/test_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v2/test_snapshots_client.py
@@ -72,6 +72,12 @@
]
}
+ FAKE_SNAPSHOT_METADATA_ITEM = {
+ "meta": {
+ "key1": "value1"
+ }
+ }
+
def setUp(self):
super(TestSnapshotsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -142,6 +148,15 @@
self.FAKE_INFO_SNAPSHOT,
bytes_body, volume_type_id="cbc36478b0bd8e67e89")
+ def _test_show_snapshot_metadata_item(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_snapshot_metadata_item,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
+ bytes_body,
+ snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ id="key1")
+
def test_create_snapshot_with_str_body(self):
self._test_create_snapshot()
@@ -184,6 +199,12 @@
def test_update_snapshot_metadata_with_bytes_body(self):
self._test_update_snapshot_metadata(bytes_body=True)
+ def test_show_snapshot_metadata_item_with_str_body(self):
+ self._test_show_snapshot_metadata_item()
+
+ def test_show_snapshot_metadata_item_with_bytes_body(self):
+ self._test_show_snapshot_metadata_item(bytes_body=True)
+
def test_force_delete_snapshot(self):
self.check_service_client_function(
self.client.force_delete_snapshot,
diff --git a/tempest/tests/lib/services/volume/v2/test_transfers_client.py b/tempest/tests/lib/services/volume/v2/test_transfers_client.py
index 0c59bf2..84f4992 100644
--- a/tempest/tests/lib/services/volume/v2/test_transfers_client.py
+++ b/tempest/tests/lib/services/volume/v2/test_transfers_client.py
@@ -13,6 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
+import mock
+from oslo_serialization import jsonutils as json
+
from tempest.lib.services.volume.v2 import transfers_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -20,11 +25,14 @@
class TestTransfersClient(base.BaseServiceTest):
- FAKE_LIST_VOLUME_TRANSFERS_WITH_DETAIL = {
- "transfers": [{
- "created_at": "2017-04-18T09:10:03.000000",
+ FAKE_VOLUME_TRANSFER_ID = "0e89cdd1-6249-421b-96d8-25fac0623d42"
+
+ FAKE_VOLUME_TRANSFER_INFO = {
+ "transfer": {
+ "id": FAKE_VOLUME_TRANSFER_ID,
+ "name": "fake-volume-transfer",
"volume_id": "47bf04ef-1ea5-4c5f-a375-430a086d6747",
- "id": "0e89cdd1-6249-421b-96d8-25fac0623d42",
+ "created_at": "2017-04-18T09:10:03.000000",
"links": [
{
"href": "fake-url-1",
@@ -34,9 +42,8 @@
"href": "fake-url-2",
"rel": "bookmark"
}
- ],
- "name": "fake-volume-transfer"
- }]
+ ]
+ }
}
def setUp(self):
@@ -46,16 +53,106 @@
'volume',
'regionOne')
- def _test_list_volume_transfers_with_detail(self, bytes_body=False):
+ def _test_create_volume_transfer(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_VOLUME_TRANSFER_INFO)
+ resp_body['transfer'].update({"auth_key": "fake-auth-key"})
+ kwargs = {"name": "fake-volume-transfer",
+ "volume_id": "47bf04ef-1ea5-4c5f-a375-430a086d6747"}
+ payload = json.dumps({"transfer": kwargs}, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(transfers_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.create_volume_transfer,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ resp_body,
+ to_utf=bytes_body,
+ status=202,
+ mock_args=['os-volume-transfer', payload],
+ **kwargs)
+
+ def _test_accept_volume_transfer(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_VOLUME_TRANSFER_INFO)
+ resp_body['transfer'].pop('created_at')
+ kwargs = {"auth_key": "fake-auth-key"}
+ payload = json.dumps({"accept": kwargs}, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(transfers_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.accept_volume_transfer,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ resp_body,
+ to_utf=bytes_body,
+ status=202,
+ mock_args=['os-volume-transfer/%s/accept' %
+ self.FAKE_VOLUME_TRANSFER_ID, payload],
+ transfer_id=self.FAKE_VOLUME_TRANSFER_ID,
+ **kwargs)
+
+ def _test_show_volume_transfer(self, bytes_body=False):
+ resp_body = self.FAKE_VOLUME_TRANSFER_INFO
+ self.check_service_client_function(
+ self.client.show_volume_transfer,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ resp_body,
+ to_utf=bytes_body,
+ transfer_id="0e89cdd1-6249-421b-96d8-25fac0623d42")
+
+ def _test_list_volume_transfers(self, detail=False, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_VOLUME_TRANSFER_INFO)
+ if not detail:
+ resp_body['transfer'].pop('created_at')
+ resp_body = {"transfers": [resp_body['transfer']]}
self.check_service_client_function(
self.client.list_volume_transfers,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_LIST_VOLUME_TRANSFERS_WITH_DETAIL,
- bytes_body,
- detail=True)
+ resp_body,
+ to_utf=bytes_body,
+ detail=detail)
+
+ def test_create_volume_transfer_with_str_body(self):
+ self._test_create_volume_transfer()
+
+ def test_create_volume_transfer_with_bytes_body(self):
+ self._test_create_volume_transfer(bytes_body=True)
+
+ def test_accept_volume_transfer_with_str_body(self):
+ self._test_accept_volume_transfer()
+
+ def test_accept_volume_transfer_with_bytes_body(self):
+ self._test_accept_volume_transfer(bytes_body=True)
+
+ def test_show_volume_transfer_with_str_body(self):
+ self._test_show_volume_transfer()
+
+ def test_show_volume_transfer_with_bytes_body(self):
+ self._test_show_volume_transfer(bytes_body=True)
+
+ def test_list_volume_transfers_with_str_body(self):
+ self._test_list_volume_transfers()
+
+ def test_list_volume_transfers_with_bytes_body(self):
+ self._test_list_volume_transfers(bytes_body=True)
def test_list_volume_transfers_with_detail_with_str_body(self):
- self._test_list_volume_transfers_with_detail()
+ self._test_list_volume_transfers(detail=True)
def test_list_volume_transfers_with_detail_with_bytes_body(self):
- self._test_list_volume_transfers_with_detail(bytes_body=True)
+ self._test_list_volume_transfers(detail=True, bytes_body=True)
+
+ def test_delete_volume_transfer(self):
+ self.check_service_client_function(
+ self.client.delete_volume_transfer,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ status=202,
+ transfer_id="0e89cdd1-6249-421b-96d8-25fac0623d42")
diff --git a/tempest/tests/lib/services/volume/v2/test_volume_manage_client.py b/tempest/tests/lib/services/volume/v2/test_volume_manage_client.py
new file mode 100644
index 0000000..ea4a9f9
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v2/test_volume_manage_client.py
@@ -0,0 +1,111 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.services.volume.v2 import volume_manage_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestVolumeManageClient(base.BaseServiceTest):
+
+ VOLUME_MANAGE_REQUEST = {
+ "volume": {
+ "host": "controller1@rbd#rbd",
+ "name": "volume-managed",
+ "availability_zone": "nova",
+ "bootable": False,
+ "metadata": None,
+ "ref": {
+ "source-name": "volume-2ce6ca46-e6c1-4fe5-8268-3a1c536fcbf3"
+ },
+ "volume_type": None,
+ "description": "volume-manage-description"
+ }
+ }
+
+ VOLUME_MANAGE_RESPONSE = {
+ "volume": {
+ "migration_status": None,
+ "attachments": [],
+ "links": [
+ {
+ "href": "fake-url-1",
+ "rel": "self"
+ },
+ {
+ "href": "fake-url-2",
+ "rel": "bookmark"
+ }
+ ],
+ "availability_zone": "nova",
+ "os-vol-host-attr:host": "controller1@rbd#rbd",
+ "encrypted": False,
+ "updated_at": None,
+ "replication_status": None,
+ "snapshot_id": None,
+ "id": "c07cd4a4-b52b-4511-a176-fbaa2011a227",
+ "size": 0,
+ "user_id": "142d8663efce464c89811c63e45bd82e",
+ "os-vol-tenant-attr:tenant_id": "f21a9c86d7114bf99c711f4874d80474",
+ "os-vol-mig-status-attr:migstat": None,
+ "metadata": {},
+ "status": "creating",
+ "description": "volume-manage-description",
+ "multiattach": False,
+ "source_volid": None,
+ "consistencygroup_id": None,
+ "os-vol-mig-status-attr:name_id": None,
+ "name": "volume-managed",
+ "bootable": "false",
+ "created_at": "2017-07-11T09:14:01.000000",
+ "volume_type": None
+ }
+ }
+
+ def setUp(self):
+ super(TestVolumeManageClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = volume_manage_client.VolumeManageClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_manage_volume(self, bytes_body=False):
+ payload = json.dumps(self.VOLUME_MANAGE_REQUEST, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(volume_manage_client.json,
+ 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.manage_volume,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.VOLUME_MANAGE_RESPONSE,
+ to_utf=bytes_body,
+ status=202,
+ mock_args=['os-volume-manage', payload],
+ **self.VOLUME_MANAGE_REQUEST['volume'])
+
+ def test_manage_volume_with_str_body(self):
+ self._test_manage_volume()
+
+ def test_manage_volume_with_bytes_body(self):
+ self._test_manage_volume(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v2/test_volumes_client.py b/tempest/tests/lib/services/volume/v2/test_volumes_client.py
index 498b963..d7b042e 100644
--- a/tempest/tests/lib/services/volume/v2/test_volumes_client.py
+++ b/tempest/tests/lib/services/volume/v2/test_volumes_client.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_serialization import jsonutils as json
+
from tempest.lib.services.volume.v2 import volumes_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -20,6 +22,25 @@
class TestVolumesClient(base.BaseServiceTest):
+ FAKE_VOLUME_METADATA_ITEM = {
+ "meta": {
+ "key1": "value1"
+ }
+ }
+
+ FAKE_VOLUME_IMAGE_METADATA = {
+ "metadata": {
+ "container_format": "bare",
+ "min_ram": "0",
+ "disk_format": "raw",
+ "image_name": "xly-ubuntu16-server",
+ "image_id": "3e087b0c-10c5-4255-b147-6e8e9dbad6fc",
+ "checksum": "008f5d22fe3cb825d714da79607a90f9",
+ "min_disk": "0",
+ "size": "8589934592"
+ }
+ }
+
def setUp(self):
super(TestVolumesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -27,6 +48,22 @@
'volume',
'regionOne')
+ def _test_retype_volume(self, bytes_body=False):
+ kwargs = {
+ "new_type": "dedup-tier-replication",
+ "migration_policy": "never"
+ }
+
+ self.check_service_client_function(
+ self.client.retype_volume,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {},
+ to_utf=bytes_body,
+ status=202,
+ volume_id="a3be971b-8de5-4bdf-bdb8-3d8eb0fb69f8",
+ **kwargs
+ )
+
def _test_force_detach_volume(self, bytes_body=False):
kwargs = {
'attachment_id': '6980e295-920f-412e-b189-05c50d605acd',
@@ -45,8 +82,46 @@
**kwargs
)
+ def _test_show_volume_metadata_item(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_volume_metadata_item,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_METADATA_ITEM,
+ to_utf=bytes_body,
+ volume_id="a3be971b-8de5-4bdf-bdb8-3d8eb0fb69f8",
+ id="key1")
+
+ def _test_show_volume_image_metadata(self, bytes_body=False):
+ fake_volume_id = "a3be971b-8de5-4bdf-bdb8-3d8eb0fb69f8"
+ self.check_service_client_function(
+ self.client.show_volume_image_metadata,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_VOLUME_IMAGE_METADATA,
+ to_utf=bytes_body,
+ mock_args=['volumes/%s/action' % fake_volume_id,
+ json.dumps({"os-show_image_metadata": {}})],
+ volume_id=fake_volume_id)
+
def test_force_detach_volume_with_str_body(self):
self._test_force_detach_volume()
def test_force_detach_volume_with_bytes_body(self):
self._test_force_detach_volume(bytes_body=True)
+
+ def test_show_volume_metadata_item_with_str_body(self):
+ self._test_show_volume_metadata_item()
+
+ def test_show_volume_metadata_item_with_bytes_body(self):
+ self._test_show_volume_metadata_item(bytes_body=True)
+
+ def test_show_volume_image_metadata_with_str_body(self):
+ self._test_show_volume_image_metadata()
+
+ def test_show_volume_image_metadata_with_bytes_body(self):
+ self._test_show_volume_image_metadata(bytes_body=True)
+
+ def test_retype_volume_with_str_body(self):
+ self._test_retype_volume()
+
+ def test_retype_volume_with_bytes_body(self):
+ self._test_retype_volume(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_backups_client.py b/tempest/tests/lib/services/volume/v3/test_backups_client.py
new file mode 100644
index 0000000..f1ce987
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_backups_client.py
@@ -0,0 +1,50 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v3 import backups_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestBackupsClient(base.BaseServiceTest):
+
+ FAKE_BACKUP_UPDATE = {
+ "backup": {
+ "id": "4c65c15f-a5c5-464b-b92a-90e4c04636a7",
+ "name": "fake-backup-name",
+ "links": "fake-links"
+ }
+ }
+
+ def setUp(self):
+ super(TestBackupsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = backups_client.BackupsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_update_backup(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_backup,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_BACKUP_UPDATE,
+ bytes_body,
+ backup_id='4c65c15f-a5c5-464b-b92a-90e4c04636a7')
+
+ def test_update_backup_with_str_body(self):
+ self._test_update_backup()
+
+ def test_update_backup_with_bytes_body(self):
+ self._test_update_backup(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
new file mode 100644
index 0000000..c2784b2
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v3 import group_snapshots_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestGroupSnapshotsClient(base.BaseServiceTest):
+ FAKE_CREATE_GROUP_SNAPSHOT = {
+ "group_snapshot": {
+ "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
+ "name": "group-snapshot-001",
+ "description": "Test group snapshot 1"
+ }
+ }
+
+ FAKE_INFO_GROUP_SNAPSHOT = {
+ "group_snapshot": {
+ "id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
+ "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
+ "name": "group-snapshot-001",
+ "description": "Test group snapshot 1",
+ "group_type_id": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
+ "status": "available",
+ "created_at": "20127-06-20T03:50:07Z"
+ }
+ }
+
+ FAKE_LIST_GROUP_SNAPSHOTS = {
+ "group_snapshots": [
+ {
+ "id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
+ "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
+ "name": "group-snapshot-001",
+ "description": "Test group snapshot 1",
+ "group_type_id": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
+ "status": "available",
+ "created_at": "2017-06-20T03:50:07Z",
+ },
+ {
+ "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
+ "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
+ "name": "group-snapshot-002",
+ "description": "Test group snapshot 2",
+ "group_type_id": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
+ "status": "available",
+ "created_at": "2017-06-19T01:52:47Z",
+ },
+ {
+ "id": "c5c4769e-213c-40a6-a568-8e797bb691d4",
+ "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
+ "name": "group-snapshot-003",
+ "description": "Test group snapshot 3",
+ "group_type_id": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
+ "status": "available",
+ "created_at": "2017-06-18T06:34:32Z",
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestGroupSnapshotsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = group_snapshots_client.GroupSnapshotsClient(
+ fake_auth, 'volume', 'regionOne')
+
+ def _test_create_group_snapshot(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_group_snapshot,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_GROUP_SNAPSHOT,
+ bytes_body,
+ group_id="49c8c114-0d68-4e89-b8bc-3f5a674d54be",
+ status=202)
+
+ def _test_show_group_snapshot(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_group_snapshot,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_INFO_GROUP_SNAPSHOT,
+ bytes_body,
+ group_snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+
+ def _test_list_group_snapshots(self, detail=False, bytes_body=False,
+ mock_args='group_snapshots', **params):
+ resp_body = []
+ if detail:
+ resp_body = self.FAKE_LIST_GROUP_SNAPSHOTS
+ else:
+ resp_body = {
+ 'group_snapshots': [{
+ 'id': group_snapshot['id'],
+ 'name': group_snapshot['name'],
+ 'group_type_id': group_snapshot['group_type_id']}
+ for group_snapshot in
+ self.FAKE_LIST_GROUP_SNAPSHOTS['group_snapshots']
+ ]
+ }
+ self.check_service_client_function(
+ self.client.list_group_snapshots,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=[mock_args],
+ detail=detail,
+ **params)
+
+ def test_create_group_snapshot_with_str_body(self):
+ self._test_create_group_snapshot()
+
+ def test_create_group_snapshot_with_bytes_body(self):
+ self._test_create_group_snapshot(bytes_body=True)
+
+ def test_show_group_snapshot_with_str_body(self):
+ self._test_show_group_snapshot()
+
+ def test_show_group_snapshot_with_bytes_body(self):
+ self._test_show_group_snapshot(bytes_body=True)
+
+ def test_list_group_snapshots_with_str_body(self):
+ self._test_list_group_snapshots()
+
+ def test_list_group_snapshots_with_bytes_body(self):
+ self._test_list_group_snapshots(bytes_body=True)
+
+ def test_list_group_snapshots_with_detail_with_str_body(self):
+ mock_args = "group_snapshots/detail"
+ self._test_list_group_snapshots(detail=True, mock_args=mock_args)
+
+ def test_list_group_snapshots_with_detail_with_bytes_body(self):
+ mock_args = "group_snapshots/detail"
+ self._test_list_group_snapshots(detail=True, bytes_body=True,
+ mock_args=mock_args)
+
+ def test_list_group_snapshots_with_params(self):
+ # Run the test separately for each param, to avoid assertion error
+ # resulting from randomized params order.
+ mock_args = 'group_snapshots?sort_key=name'
+ self._test_list_group_snapshots(mock_args=mock_args, sort_key='name')
+
+ mock_args = 'group_snapshots/detail?limit=10'
+ self._test_list_group_snapshots(detail=True, bytes_body=True,
+ mock_args=mock_args, limit=10)
+
+ def test_delete_group_snapshot(self):
+ self.check_service_client_function(
+ self.client.delete_group_snapshot,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ group_snapshot_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
+ status=202)
+
+ def test_reset_group_snapshot_status(self):
+ self.check_service_client_function(
+ self.client.reset_group_snapshot_status,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {},
+ status=202,
+ group_snapshot_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
+ status_to_set='error')
diff --git a/tempest/tests/lib/services/volume/v3/test_group_types_client.py b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
index 95498c7..0f456a2 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
@@ -27,6 +27,46 @@
}
}
+ FAKE_INFO_GROUP_TYPE = {
+ "group_type": {
+ "id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
+ "name": "group-type-001",
+ "description": "Test group type 1",
+ "is_public": True,
+ "created_at": "20127-06-20T03:50:07Z",
+ "group_specs": {},
+ }
+ }
+
+ FAKE_LIST_GROUP_TYPES = {
+ "group_types": [
+ {
+ "id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
+ "name": "group-type-001",
+ "description": "Test group type 1",
+ "is_public": True,
+ "created_at": "2017-06-20T03:50:07Z",
+ "group_specs": {},
+ },
+ {
+ "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
+ "name": "group-type-002",
+ "description": "Test group type 2",
+ "is_public": True,
+ "created_at": "2017-06-19T01:52:47Z",
+ "group_specs": {},
+ },
+ {
+ "id": "c5c4769e-213c-40a6-a568-8e797bb691d4",
+ "name": "group-type-003",
+ "description": "Test group type 3",
+ "is_public": True,
+ "created_at": "2017-06-18T06:34:32Z",
+ "group_specs": {},
+ }
+ ]
+ }
+
def setUp(self):
super(TestGroupTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -42,6 +82,21 @@
bytes_body,
status=202)
+ def _test_show_group_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_group_type,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_INFO_GROUP_TYPE,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+
+ def _test_list_group_types(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_group_types,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_GROUP_TYPES,
+ bytes_body)
+
def test_create_group_type_with_str_body(self):
self._test_create_group_type()
@@ -55,3 +110,15 @@
{},
group_type_id='0e58433f-d108-4bf3-a22c-34e6b71ef86b',
status=202)
+
+ def test_show_group_type_with_str_body(self):
+ self._test_show_group_type()
+
+ def test_show_group_type_with_bytes_body(self):
+ self._test_show_group_type(bytes_body=True)
+
+ def test_list_group_types_with_str_body(self):
+ self._test_list_group_types()
+
+ def test_list_group_types_with_bytes_body(self):
+ self._test_list_group_types(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_groups_client.py b/tempest/tests/lib/services/volume/v3/test_groups_client.py
index 00db5b4..918e958 100644
--- a/tempest/tests/lib/services/volume/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_groups_client.py
@@ -28,6 +28,33 @@
}
}
+ FAKE_CREATE_GROUP_FROM_GROUP_SNAPSHOT = {
+ "create-from-src": {
+ "name": "group-002",
+ "description": "Test group 2",
+ "group_snapshot_id": "79c9afdb-7e46-4d71-9249-1f022886963c",
+ }
+ }
+
+ FAKE_CREATE_GROUP_FROM_GROUP = {
+ "create-from-src": {
+ "name": "group-003",
+ "description": "Test group 3",
+ "source_group_id": "e92f9dc7-0b20-492d-8ab2-3ad8fdac270e",
+ }
+ }
+
+ FAKE_UPDATE_GROUP = {
+ "group": {
+ "name": "new-group",
+ "description": "New test group",
+ "add_volumes": "27d45037-ade3-4a87-b729-dba3293c06f3,"
+ "6e7cd916-d961-41cc-b3bd-0601ca0c701f",
+ "remove_volumes": "4d580519-6467-448e-95e9-5b25c94d83c7,"
+ "ea22464c-f095-4a87-a31f-c5d34e0c6fc9"
+ }
+ }
+
FAKE_INFO_GROUP = {
"group": {
"id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
@@ -134,3 +161,35 @@
{},
group_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
status=202)
+
+ def test_create_group_from_group_snapshot(self):
+ self.check_service_client_function(
+ self.client.create_group_from_source,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_GROUP_FROM_GROUP_SNAPSHOT,
+ status=202)
+
+ def test_create_group_from_group(self):
+ self.check_service_client_function(
+ self.client.create_group_from_source,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_GROUP_FROM_GROUP,
+ status=202)
+
+ def test_update_group(self):
+ self.check_service_client_function(
+ self.client.update_group,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ group_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
+ status=202,
+ **self.FAKE_UPDATE_GROUP['group'])
+
+ def test_reset_group_status(self):
+ self.check_service_client_function(
+ self.client.reset_group_status,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {},
+ status=202,
+ group_id='0e701ab8-1bec-4b9f-b026-a7ba4af13578',
+ status_to_set='error')
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index bbebcd3..ed0eea3 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -125,35 +125,6 @@
self.assertRaises(ValueError, self._test_helper, _id)
-class TestSkipUnlessAttrDecorator(base.TestCase):
- def _test_skip_unless_attr(self, attr, expected_to_skip=True):
- class TestFoo(test.BaseTestCase):
- expected_attr = not expected_to_skip
-
- @decorators.skip_unless_attr(attr)
- def test_foo(self):
- pass
-
- t = TestFoo('test_foo')
- if expected_to_skip:
- self.assertRaises(testtools.TestCase.skipException,
- t.test_foo)
- else:
- try:
- t.test_foo()
- except Exception:
- raise testtools.TestCase.failureException()
-
- def test_skip_attr_does_not_exist(self):
- self._test_skip_unless_attr('unexpected_attr')
-
- def test_skip_attr_false(self):
- self._test_skip_unless_attr('expected_attr')
-
- def test_no_skip_for_attr_exist_and_true(self):
- self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
-
-
class TestRelatedBugDecorator(base.TestCase):
def test_relatedbug_when_no_exception(self):
f = mock.Mock()
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index a16da1c..37fe646 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -12,11 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from io import StringIO
import socket
import mock
import six
+from six import StringIO
import testtools
from tempest.lib.common import ssh
diff --git a/tempest/tests/services/object_storage/test_object_client.py b/tempest/tests/services/object_storage/test_object_client.py
index 748614c..86535f9 100644
--- a/tempest/tests/services/object_storage/test_object_client.py
+++ b/tempest/tests/services/object_storage/test_object_client.py
@@ -31,15 +31,15 @@
self.object_client = object_client.ObjectClient(self.fake_auth,
'swift', 'region1')
- @mock.patch.object(object_client, 'create_connection')
+ @mock.patch.object(object_client, '_create_connection')
def test_create_object_continue_no_data(self, mock_poc):
self._validate_create_object_continue(None, mock_poc)
- @mock.patch.object(object_client, 'create_connection')
+ @mock.patch.object(object_client, '_create_connection')
def test_create_object_continue_with_data(self, mock_poc):
self._validate_create_object_continue('hello', mock_poc)
- @mock.patch.object(object_client, 'create_connection')
+ @mock.patch.object(object_client, '_create_connection')
def test_create_continue_with_no_continue_received(self, mock_poc):
self._validate_create_object_continue('hello', mock_poc,
initial_status=201)
diff --git a/tempest/tests/test_base_test.py b/tempest/tests/test_base_test.py
index 01b8a72..011bc9b 100644
--- a/tempest/tests/test_base_test.py
+++ b/tempest/tests/test_base_test.py
@@ -13,11 +13,12 @@
# under the License.
import mock
+from oslo_config import cfg
from tempest import clients
from tempest.common import credentials_factory as credentials
-from tempest.common import fixed_network
from tempest import config
+from tempest.lib.common import fixed_network
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
@@ -27,9 +28,12 @@
def setUp(self):
super(TestBaseTestCase, self).setUp()
self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
self.fixed_network_name = 'fixed-net'
- config.CONF.compute.fixed_network_name = self.fixed_network_name
- config.CONF.service_available.neutron = True
+ cfg.CONF.set_default('fixed_network_name', self.fixed_network_name,
+ 'compute')
+ cfg.CONF.set_default('neutron', True, 'service_available')
@mock.patch.object(test.BaseTestCase, 'get_client_manager')
@mock.patch.object(test.BaseTestCase, '_get_credentials_provider')
@@ -56,7 +60,7 @@
def test_get_tenant_network_with_nova_net(self, mock_man, mock_iaa,
mock_giv, mock_gtn, mock_gcp,
mock_gcm):
- config.CONF.service_available.neutron = False
+ cfg.CONF.set_default('neutron', False, 'service_available')
mock_prov = mock.Mock()
mock_admin_man = mock.Mock()
mock_iaa.return_value = True
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 2fc84dc..6018441 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -16,7 +16,9 @@
from oslo_config import cfg
import testtools
+from tempest.common import utils
from tempest import config
+from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest import test
from tempest.tests import base
@@ -31,6 +33,10 @@
fake_config.FakePrivate)
+# NOTE: The test module is for tempest.test.idempotent_id.
+# After all projects switch to use decorators.idempotent_id,
+# we can remove tempest.test.idempotent_id as well as this
+# test module
class TestIdempotentIdDecorator(BaseDecoratorsTest):
def _test_helper(self, _id, **decorator_args):
@@ -71,7 +77,7 @@
class TestServicesDecorator(BaseDecoratorsTest):
def _test_services_helper(self, *decorator_args):
class TestFoo(test.BaseTestCase):
- @test.services(*decorator_args)
+ @utils.services(*decorator_args)
def test_bar(self):
return 0
@@ -90,7 +96,7 @@
self._test_services_helper('compute', 'compute')
def test_services_decorator_with_invalid_service(self):
- self.assertRaises(test.InvalidServiceTag,
+ self.assertRaises(exceptions.InvalidServiceTag,
self._test_services_helper, 'compute',
'bad_service')
@@ -102,11 +108,11 @@
'volume')
def test_services_list(self):
- service_list = test.get_service_list()
+ service_list = utils.get_service_list()
for service in service_list:
try:
self._test_services_helper(service)
- except test.InvalidServiceTag:
+ except exceptions.InvalidServiceTag:
self.fail('%s is not listed in the valid service tag list'
% service)
except KeyError:
@@ -133,7 +139,7 @@
def _test_requires_ext_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
- @test.requires_ext(**decorator_args)
+ @utils.requires_ext(**decorator_args)
def test_bar(self):
return 0
@@ -170,96 +176,3 @@
self._test_requires_ext_helper,
extension='enabled_ext',
service='bad_service')
-
-
-class TestConfigDecorators(BaseDecoratorsTest):
- def setUp(self):
- super(TestConfigDecorators, self).setUp()
- cfg.CONF.set_default('nova', True, 'service_available')
- cfg.CONF.set_default('glance', False, 'service_available')
-
- def _assert_skip_message(self, func, skip_msg):
- try:
- func()
- self.fail()
- except testtools.TestCase.skipException as skip_exc:
- self.assertEqual(skip_exc.args[0], skip_msg)
-
- def _test_skip_unless_config(self, expected_to_skip=True, *decorator_args):
-
- class TestFoo(test.BaseTestCase):
- @config.skip_unless_config(*decorator_args)
- def test_bar(self):
- return 0
-
- t = TestFoo('test_bar')
- if expected_to_skip:
- self.assertRaises(testtools.TestCase.skipException, t.test_bar)
- if (len(decorator_args) >= 3):
- # decorator_args[2]: skip message specified
- self._assert_skip_message(t.test_bar, decorator_args[2])
- else:
- try:
- self.assertEqual(t.test_bar(), 0)
- except testtools.TestCase.skipException:
- # We caught a skipException but we didn't expect to skip
- # this test so raise a hard test failure instead.
- raise testtools.TestCase.failureException(
- "Not supposed to skip")
-
- def _test_skip_if_config(self, expected_to_skip=True,
- *decorator_args):
-
- class TestFoo(test.BaseTestCase):
- @config.skip_if_config(*decorator_args)
- def test_bar(self):
- return 0
-
- t = TestFoo('test_bar')
- if expected_to_skip:
- self.assertRaises(testtools.TestCase.skipException, t.test_bar)
- if (len(decorator_args) >= 3):
- # decorator_args[2]: skip message specified
- self._assert_skip_message(t.test_bar, decorator_args[2])
- else:
- try:
- self.assertEqual(t.test_bar(), 0)
- except testtools.TestCase.skipException:
- # We caught a skipException but we didn't expect to skip
- # this test so raise a hard test failure instead.
- raise testtools.TestCase.failureException(
- "Not supposed to skip")
-
- def test_skip_unless_no_group(self):
- self._test_skip_unless_config(True, 'fake_group', 'an_option')
-
- def test_skip_unless_no_option(self):
- self._test_skip_unless_config(True, 'service_available',
- 'not_an_option')
-
- def test_skip_unless_false_option(self):
- self._test_skip_unless_config(True, 'service_available', 'glance')
-
- def test_skip_unless_false_option_msg(self):
- self._test_skip_unless_config(True, 'service_available', 'glance',
- 'skip message')
-
- def test_skip_unless_true_option(self):
- self._test_skip_unless_config(False,
- 'service_available', 'nova')
-
- def test_skip_if_no_group(self):
- self._test_skip_if_config(False, 'fake_group', 'an_option')
-
- def test_skip_if_no_option(self):
- self._test_skip_if_config(False, 'service_available', 'not_an_option')
-
- def test_skip_if_false_option(self):
- self._test_skip_if_config(False, 'service_available', 'glance')
-
- def test_skip_if_true_option(self):
- self._test_skip_if_config(True, 'service_available', 'nova')
-
- def test_skip_if_true_option_msg(self):
- self._test_skip_if_config(True, 'service_available', 'nova',
- 'skip message')
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index f005c21..bc3a753 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -86,13 +86,13 @@
def test_scenario_tests_need_service_tags(self):
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake:', './tempest/scenario/test_fake.py',
- "@test.services('compute')"))
+ "@utils.services('compute')"))
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/api/compute/test_fake.py',
- "@test.services('image')"))
+ "@utils.services('image')"))
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
- "@test.services('compute')"))
+ "@utils.services('compute')"))
self.assertTrue(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/scenario/test_fake.py',
'\n'))
@@ -113,12 +113,13 @@
def test_service_tags_not_in_module_path(self):
self.assertTrue(checks.service_tags_not_in_module_path(
- "@test.services('compute')", './tempest/api/compute/fake_test.py'))
+ "@utils.services('compute')",
+ './tempest/api/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
- "@test.services('compute')",
+ "@utils.services('compute')",
'./tempest/scenario/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
- "@test.services('compute')", './tempest/api/image/fake_test.py'))
+ "@utils.services('compute')", './tempest/api/image/fake_test.py'))
def test_no_hyphen_at_end_of_rand_name(self):
self.assertIsNone(checks.no_hyphen_at_end_of_rand_name(
@@ -180,3 +181,15 @@
'from oslo_config import cfg', './tempest/lib/decorators.py')))
self.assertTrue(list(checks.dont_use_config_in_tempest_lib(
'import tempest.config', './tempest/lib/common/rest_client.py')))
+
+ def test_unsupported_exception_attribute_PY3(self):
+ self.assertEqual(len(list(checks.unsupported_exception_attribute_PY3(
+ "raise TestCase.failureException(e.message)"))), 1)
+ self.assertEqual(len(list(checks.unsupported_exception_attribute_PY3(
+ "raise TestCase.failureException(ex.message)"))), 1)
+ self.assertEqual(len(list(checks.unsupported_exception_attribute_PY3(
+ "raise TestCase.failureException(exc.message)"))), 1)
+ self.assertEqual(len(list(checks.unsupported_exception_attribute_PY3(
+ "raise TestCase.failureException(exception.message)"))), 1)
+ self.assertEqual(len(list(checks.unsupported_exception_attribute_PY3(
+ "raise TestCase.failureException(ee.message)"))), 0)
diff --git a/tempest/tests/test_imports.py b/tempest/tests/test_imports.py
new file mode 100644
index 0000000..6f1cfca
--- /dev/null
+++ b/tempest/tests/test_imports.py
@@ -0,0 +1,69 @@
+# Copyright 2017 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from tempest.tests import base
+
+
+class ConfCounter(object):
+
+ def __init__(self, *args, **kwargs):
+ self.count = 0
+
+ def __getattr__(self, key):
+ self.count += 1
+ return mock.MagicMock()
+
+ def get_counts(self):
+ return self.count
+
+
+class TestImports(base.TestCase):
+ def setUp(self):
+ super(TestImports, self).setUp()
+ self.conf_mock = self.patch('tempest.config.CONF',
+ new_callable=ConfCounter)
+
+ def test_account_generator_command_import(self):
+ from tempest.cmd import account_generator # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_cleanup_command_import(self):
+ from tempest.cmd import cleanup # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_init_command_import(self):
+ from tempest.cmd import init # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_list_plugins_command_import(self):
+ from tempest.cmd import list_plugins # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_run_command_import(self):
+ from tempest.cmd import run # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_subunit_descibe_command_import(self):
+ from tempest.cmd import subunit_describe_calls # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_verify_tempest_config_command_import(self):
+ from tempest.cmd import verify_tempest_config # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_workspace_command_import(self):
+ from tempest.cmd import workspace # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
diff --git a/tempest/tests/test_list_tests.py b/tempest/tests/test_list_tests.py
index a238879..4af7463 100644
--- a/tempest/tests/test_list_tests.py
+++ b/tempest/tests/test_list_tests.py
@@ -23,12 +23,10 @@
class TestTestList(base.TestCase):
- def test_testr_list_tests_no_errors(self):
- # Remove unit test discover path from env to test tempest tests
+ def test_stestr_list_no_errors(self):
test_env = os.environ.copy()
- test_env.pop('OS_TEST_PATH')
import_failures = []
- p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE,
+ p = subprocess.Popen(['stestr', 'list'], stdout=subprocess.PIPE,
env=test_env)
ids, err = p.communicate()
self.assertEqual(0, p.returncode,
diff --git a/tempest/tests/test_microversions.py b/tempest/tests/test_microversions.py
index 173accb..ee6db71 100644
--- a/tempest/tests/test_microversions.py
+++ b/tempest/tests/test_microversions.py
@@ -13,6 +13,7 @@
# under the License.
from oslo_config import cfg
+import six
import testtools
from tempest.api.compute import base as compute_base
@@ -74,7 +75,7 @@
self.assertRaises(testtools.TestCase.skipException,
test_class.skip_checks)
except testtools.TestCase.skipException as e:
- raise testtools.TestCase.failureException(e.message)
+ raise testtools.TestCase.failureException(six.text_type(e))
def test_config_version_none_none(self):
expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2]
diff --git a/tempest/tests/test_tempest_plugin.py b/tempest/tests/test_tempest_plugin.py
index 13e2499..ddadef5 100644
--- a/tempest/tests/test_tempest_plugin.py
+++ b/tempest/tests/test_tempest_plugin.py
@@ -17,9 +17,16 @@
from tempest.test_discover import plugins
from tempest.tests import base
from tempest.tests import fake_tempest_plugin as fake_plugin
+from tempest.tests.lib.services import registry_fixture
class TestPluginDiscovery(base.TestCase):
+
+ def setUp(self):
+ super(TestPluginDiscovery, self).setUp()
+ # Make sure we leave the registry clean
+ self.useFixture(registry_fixture.RegistryFixture())
+
def test_load_tests_with_one_plugin(self):
# we can't mock stevedore since it's a singleton and already executed
# during test discovery. So basically this test covers the plugin loop
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
new file mode 100644
index 0000000..fc50736
--- /dev/null
+++ b/tempest/tests/test_test.py
@@ -0,0 +1,626 @@
+# Copyright 2017 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+import mock
+from oslo_config import cfg
+import testtools
+
+from tempest import clients
+from tempest import config
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib.services import registry_fixture
+
+
+if sys.version_info >= (2, 7):
+ import unittest
+else:
+ import unittest2 as unittest
+
+
+class LoggingTestResult(testtools.TestResult):
+
+ def __init__(self, log, *args, **kwargs):
+ super(LoggingTestResult, self).__init__(*args, **kwargs)
+ self.log = log
+
+ def addError(self, test, err=None, details=None):
+ self.log.append((test, err, details))
+
+
+class TestValidationResources(base.TestCase):
+
+ validation_resources_module = 'tempest.lib.common.validation_resources'
+
+ def setUp(self):
+ super(TestValidationResources, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.useFixture(registry_fixture.RegistryFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+
+ class TestTestClass(test.BaseTestCase):
+ pass
+
+ self.test_test_class = TestTestClass
+
+ def test_validation_resources_no_validation(self):
+ cfg.CONF.set_default('run_validation', False, 'validation')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ vr = self.test_test_class.get_class_validation_resources(osclients)
+ self.assertIsNone(vr)
+
+ def test_validation_resources_exists(self):
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ expected_vr = 'expected_validation_resources'
+ self.test_test_class._validation_resources[osclients] = expected_vr
+ obtained_vr = self.test_test_class.get_class_validation_resources(
+ osclients)
+ self.assertEqual(expected_vr, obtained_vr)
+
+ @mock.patch(validation_resources_module + '.create_validation_resources',
+ autospec=True)
+ def test_validation_resources_new(self, mock_create_vr):
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ cfg.CONF.set_default('neutron', True, 'service_available')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ expected_vr = {'expected_validation_resources': None}
+ mock_create_vr.return_value = expected_vr
+ with mock.patch.object(
+ self.test_test_class,
+ 'addClassResourceCleanup') as mock_add_class_cleanup:
+ obtained_vr = self.test_test_class.get_class_validation_resources(
+ osclients)
+ self.assertEqual(1, mock_add_class_cleanup.call_count)
+ self.assertEqual(mock.call(vr.clear_validation_resources,
+ osclients,
+ use_neutron=True,
+ **expected_vr),
+ mock_add_class_cleanup.call_args)
+ self.assertEqual(mock_create_vr.call_count, 1)
+ self.assertIn(osclients, mock_create_vr.call_args_list[0][0])
+ self.assertEqual(expected_vr, obtained_vr)
+ self.assertIn(osclients, self.test_test_class._validation_resources)
+ self.assertEqual(expected_vr,
+ self.test_test_class._validation_resources[osclients])
+
+ def test_validation_resources_invalid_config(self):
+ invalid_version = 999
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ cfg.CONF.set_default('ip_version_for_ssh', invalid_version,
+ 'validation')
+ cfg.CONF.set_default('neutron', True, 'service_available')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ with testtools.ExpectedException(
+ lib_exc.InvalidConfiguration,
+ value_re='^.*\n.*' + str(invalid_version)):
+ self.test_test_class.get_class_validation_resources(osclients)
+
+ @mock.patch(validation_resources_module + '.create_validation_resources',
+ autospec=True)
+ def test_validation_resources_invalid_config_nova_net(self,
+ mock_create_vr):
+ invalid_version = 999
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ cfg.CONF.set_default('ip_version_for_ssh', invalid_version,
+ 'validation')
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ expected_vr = {'expected_validation_resources': None}
+ mock_create_vr.return_value = expected_vr
+ obtained_vr = self.test_test_class.get_class_validation_resources(
+ osclients)
+ self.assertEqual(mock_create_vr.call_count, 1)
+ self.assertIn(osclients, mock_create_vr.call_args_list[0][0])
+ self.assertEqual(expected_vr, obtained_vr)
+ self.assertIn(osclients, self.test_test_class._validation_resources)
+ self.assertEqual(expected_vr,
+ self.test_test_class._validation_resources[osclients])
+
+ @mock.patch(validation_resources_module + '.create_validation_resources',
+ autospec=True)
+ @mock.patch(validation_resources_module + '.clear_validation_resources',
+ autospec=True)
+ def test_validation_resources_fixture(self, mock_clean_vr, mock_create_vr):
+
+ class TestWithRun(self.test_test_class):
+
+ def runTest(self):
+ pass
+
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ test_case = TestWithRun()
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ test_case.get_test_validation_resources(osclients)
+ self.assertEqual(1, mock_create_vr.call_count)
+ self.assertEqual(0, mock_clean_vr.call_count)
+
+
+class TestSetNetworkResources(base.TestCase):
+
+ def setUp(self):
+ super(TestSetNetworkResources, self).setUp()
+
+ class ParentTest(test.BaseTestCase):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(dhcp=True)
+ super(ParentTest, cls).setup_credentials()
+
+ def runTest(self):
+ pass
+
+ self.parent_class = ParentTest
+
+ def test_set_network_resources_child_only(self):
+
+ class ChildTest(self.parent_class):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(router=True)
+ super(ChildTest, cls).setup_credentials()
+
+ child_test = ChildTest()
+ child_test.setUpClass()
+ # Assert that the parents network resources are not set
+ self.assertFalse(child_test._network_resources['dhcp'])
+ # Assert that the child network resources are set
+ self.assertTrue(child_test._network_resources['router'])
+
+ def test_set_network_resources_right_order(self):
+
+ class ChildTest(self.parent_class):
+
+ @classmethod
+ def setup_credentials(cls):
+ super(ChildTest, cls).setup_credentials()
+ cls.set_network_resources(router=True)
+
+ child_test = ChildTest()
+ with testtools.ExpectedException(RuntimeError,
+ value_re='set_network_resources'):
+ child_test.setUpClass()
+
+ def test_set_network_resources_children(self):
+
+ class ChildTest(self.parent_class):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(router=True)
+ super(ChildTest, cls).setup_credentials()
+
+ class GrandChildTest(ChildTest):
+ pass
+
+ # Invoke setupClass on both and check that the setup_credentials
+ # call check mechanism does not report any false negative.
+ child_test = ChildTest()
+ child_test.setUpClass()
+ grandchild_test = GrandChildTest()
+ grandchild_test.setUpClass()
+
+
+class TestTempestBaseTestClass(base.TestCase):
+
+ def setUp(self):
+ super(TestTempestBaseTestClass, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+
+ class ParentTest(test.BaseTestCase):
+
+ def runTest(self):
+ pass
+
+ self.parent_test = ParentTest
+
+ def test_resource_cleanup(self):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ exp_args = (1, 2,)
+ exp_kwargs = {'a': 1, 'b': 2}
+ mock1 = mock.Mock()
+ mock2 = mock.Mock()
+ exp_functions = [mock1, mock2]
+
+ class TestWithCleanups(self.parent_test):
+
+ @classmethod
+ def resource_setup(cls):
+ for fn in exp_functions:
+ cls.addClassResourceCleanup(fn, *exp_args,
+ **exp_kwargs)
+
+ test_cleanups = TestWithCleanups()
+ suite = unittest.TestSuite((test_cleanups,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # No exception raised - error log is empty
+ self.assertFalse(log)
+ # All stacked resource cleanups invoked
+ mock1.assert_called_once_with(*exp_args, **exp_kwargs)
+ mock2.assert_called_once_with(*exp_args, **exp_kwargs)
+ # Cleanup stack is empty
+ self.assertEqual(0, len(test_cleanups._class_cleanups))
+
+ def test_resource_cleanup_failures(self):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ exp_args = (1, 2,)
+ exp_kwargs = {'a': 1, 'b': 2}
+ mock1 = mock.Mock()
+ mock1.side_effect = Exception('mock1 resource cleanup failure')
+ mock2 = mock.Mock()
+ mock3 = mock.Mock()
+ mock3.side_effect = Exception('mock3 resource cleanup failure')
+ exp_functions = [mock1, mock2, mock3]
+
+ class TestWithFailingCleanups(self.parent_test):
+
+ @classmethod
+ def resource_setup(cls):
+ for fn in exp_functions:
+ cls.addClassResourceCleanup(fn, *exp_args,
+ **exp_kwargs)
+
+ test_cleanups = TestWithFailingCleanups()
+ suite = unittest.TestSuite((test_cleanups,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # One multiple exception captured
+ self.assertEqual(1, len(log))
+ # [0]: test, err, details [1] -> exc_info
+ # Type, Exception, traceback [1] -> MultipleException
+ found_exc = log[0][1][1]
+ self.assertTrue(isinstance(found_exc, testtools.MultipleExceptions))
+ self.assertEqual(2, len(found_exc.args))
+ # Each arg is exc_info - match messages and order
+ self.assertIn('mock3 resource', str(found_exc.args[0][1]))
+ self.assertIn('mock1 resource', str(found_exc.args[1][1]))
+ # All stacked resource cleanups invoked
+ mock1.assert_called_once_with(*exp_args, **exp_kwargs)
+ mock2.assert_called_once_with(*exp_args, **exp_kwargs)
+ # Cleanup stack is empty
+ self.assertEqual(0, len(test_cleanups._class_cleanups))
+
+ def test_super_resource_cleanup_not_invoked(self):
+
+ class BadResourceCleanup(self.parent_test):
+
+ @classmethod
+ def resource_cleanup(cls):
+ pass
+
+ bad_class = BadResourceCleanup()
+ suite = unittest.TestSuite((bad_class,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # One multiple exception captured
+ self.assertEqual(1, len(log))
+ # [0]: test, err, details [1] -> exc_info
+ # Type, Exception, traceback [1] -> RuntimeError
+ found_exc = log[0][1][1]
+ self.assertTrue(isinstance(found_exc, RuntimeError))
+ self.assertIn(BadResourceCleanup.__name__, str(found_exc))
+
+ def test_super_skip_checks_not_invoked(self):
+
+ class BadSkipChecks(self.parent_test):
+
+ @classmethod
+ def skip_checks(cls):
+ pass
+
+ bad_class = BadSkipChecks()
+ with testtools.ExpectedException(
+ RuntimeError,
+ value_re='^.* ' + BadSkipChecks.__name__):
+ bad_class.setUpClass()
+
+ def test_super_setup_credentials_not_invoked(self):
+
+ class BadSetupCredentials(self.parent_test):
+
+ @classmethod
+ def skip_checks(cls):
+ pass
+
+ bad_class = BadSetupCredentials()
+ with testtools.ExpectedException(
+ RuntimeError,
+ value_re='^.* ' + BadSetupCredentials.__name__):
+ bad_class.setUpClass()
+
+ def test_grandparent_skip_checks_not_invoked(self):
+
+ class BadSkipChecks(self.parent_test):
+
+ @classmethod
+ def skip_checks(cls):
+ pass
+
+ class SonOfBadSkipChecks(BadSkipChecks):
+ pass
+
+ bad_class = SonOfBadSkipChecks()
+ with testtools.ExpectedException(
+ RuntimeError,
+ value_re='^.* ' + SonOfBadSkipChecks.__name__):
+ bad_class.setUpClass()
+
+ @mock.patch('tempest.common.credentials_factory.is_admin_available',
+ autospec=True, return_value=True)
+ def test_skip_checks_admin(self, mock_iaa):
+ identity_version = 'identity_version'
+
+ class NeedAdmin(self.parent_test):
+ credentials = ['admin']
+
+ @classmethod
+ def get_identity_version(cls):
+ return identity_version
+
+ NeedAdmin().skip_checks()
+ mock_iaa.assert_called_once_with('identity_version')
+
+ @mock.patch('tempest.common.credentials_factory.is_admin_available',
+ autospec=True, return_value=False)
+ def test_skip_checks_admin_not_available(self, mock_iaa):
+ identity_version = 'identity_version'
+
+ class NeedAdmin(self.parent_test):
+ credentials = ['admin']
+
+ @classmethod
+ def get_identity_version(cls):
+ return identity_version
+
+ with testtools.ExpectedException(testtools.testcase.TestSkipped):
+ NeedAdmin().skip_checks()
+ mock_iaa.assert_called_once_with('identity_version')
+
+ def test_skip_checks_identity_v2_not_available(self):
+ cfg.CONF.set_default('api_v2', False, 'identity-feature-enabled')
+
+ class NeedV2(self.parent_test):
+ identity_version = 'v2'
+
+ with testtools.ExpectedException(testtools.testcase.TestSkipped):
+ NeedV2().skip_checks()
+
+ def test_skip_checks_identity_v3_not_available(self):
+ cfg.CONF.set_default('api_v3', False, 'identity-feature-enabled')
+
+ class NeedV3(self.parent_test):
+ identity_version = 'v3'
+
+ with testtools.ExpectedException(testtools.testcase.TestSkipped):
+ NeedV3().skip_checks()
+
+ def test_setup_credentials_all(self):
+ expected_creds = ['string', ['list', 'role1', 'role2']]
+
+ class AllCredentials(self.parent_test):
+ credentials = expected_creds
+
+ expected_clients = 'clients'
+ with mock.patch.object(
+ AllCredentials,
+ 'get_client_manager') as mock_get_client_manager:
+ mock_get_client_manager.return_value = expected_clients
+ all_creds = AllCredentials()
+ all_creds.setup_credentials()
+ self.assertTrue(hasattr(all_creds, 'os_string'))
+ self.assertEqual(expected_clients, all_creds.os_string)
+ self.assertTrue(hasattr(all_creds, 'os_roles_list'))
+ self.assertEqual(expected_clients, all_creds.os_roles_list)
+ self.assertEqual(2, mock_get_client_manager.call_count)
+ self.assertEqual(
+ expected_creds[0],
+ mock_get_client_manager.mock_calls[0][2]['credential_type'])
+ self.assertEqual(
+ expected_creds[1][1:],
+ mock_get_client_manager.mock_calls[1][2]['roles'])
+
+ def test_setup_class_overwritten(self):
+
+ class OverridesSetup(self.parent_test):
+
+ @classmethod
+ def setUpClass(cls): # noqa
+ pass
+
+ overrides_setup = OverridesSetup()
+ suite = unittest.TestSuite((overrides_setup,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # Record 0, test (error holder). The error generates during test run.
+ self.assertIn('runTest', str(log[0][0]))
+ # Record 0, traceback
+ self.assertRegex(
+ str(log[0][2]['traceback']).replace('\n', ' '),
+ RuntimeError.__name__ + ': .* ' + OverridesSetup.__name__)
+
+
+class TestTempestBaseTestClassFixtures(base.TestCase):
+
+ SETUP_FIXTURES = [test.BaseTestCase.setUpClass.__name__,
+ test.BaseTestCase.skip_checks.__name__,
+ test.BaseTestCase.setup_credentials.__name__,
+ test.BaseTestCase.setup_clients.__name__,
+ test.BaseTestCase.resource_setup.__name__]
+ TEARDOWN_FIXTURES = [test.BaseTestCase.tearDownClass.__name__,
+ test.BaseTestCase.resource_cleanup.__name__,
+ test.BaseTestCase.clear_credentials.__name__]
+
+ def setUp(self):
+ super(TestTempestBaseTestClassFixtures, self).setUp()
+ self.mocks = {}
+ for fix in self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES:
+ self.mocks[fix] = mock.Mock()
+
+ def tracker_builder(name):
+
+ def tracker(cls):
+ # Track that the fixture was invoked
+ cls.fixtures_invoked.append(name)
+ # Run the fixture
+ getattr(super(TestWithClassFixtures, cls), name)()
+ # Run a mock we can use for side effects
+ self.mocks[name]()
+
+ return tracker
+
+ class TestWithClassFixtures(test.BaseTestCase):
+
+ credentials = []
+ fixtures_invoked = []
+
+ def runTest(_self):
+ pass
+
+ # Decorate all test class fixtures with tracker_builder
+ for method_name in self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES:
+ setattr(TestWithClassFixtures, method_name,
+ classmethod(tracker_builder(method_name)))
+
+ self.test = TestWithClassFixtures()
+
+ def test_no_error_flow(self):
+ # If all setup fixtures are executed, all cleanup fixtures are
+ # executed too
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ self.assertEqual(self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES,
+ self.test.fixtures_invoked)
+
+ def test_skip_only(self):
+ # If a skip condition is hit in the test, no credentials or resource
+ # is provisioned / cleaned-up
+ self.mocks['skip_checks'].side_effect = (
+ testtools.testcase.TestSkipped())
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # If we trigger a skip condition, teardown is not invoked at all
+ self.assertEqual(self.SETUP_FIXTURES[:2],
+ self.test.fixtures_invoked)
+
+ def test_skip_credentials_fails(self):
+ expected_exc = 'sc exploded'
+ self.mocks['setup_credentials'].side_effect = Exception(expected_exc)
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # If setup_credentials explodes, we invoked teardown class and
+ # clear credentials, and re-raise
+ self.assertEqual((self.SETUP_FIXTURES[:3] +
+ [self.TEARDOWN_FIXTURES[i] for i in (0, 2)]),
+ self.test.fixtures_invoked)
+ found_exc = log[0][1][1]
+ self.assertIn(expected_exc, str(found_exc))
+
+ def test_skip_credentials_fails_clear_fails(self):
+ # If cleanup fails on failure, we log the exception and do not
+ # re-raise it. Note that since the exception happens outside of
+ # the Tempest test setUp, logging is not captured on the Tempest
+ # test side, it will be captured by the unit test instead.
+ expected_exc = 'sc exploded'
+ clear_exc = 'clear exploded'
+ self.mocks['setup_credentials'].side_effect = Exception(expected_exc)
+ self.mocks['clear_credentials'].side_effect = Exception(clear_exc)
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # If setup_credentials explodes, we invoked teardown class and
+ # clear credentials, and re-raise
+ self.assertEqual((self.SETUP_FIXTURES[:3] +
+ [self.TEARDOWN_FIXTURES[i] for i in (0, 2)]),
+ self.test.fixtures_invoked)
+ found_exc = log[0][1][1]
+ self.assertIn(expected_exc, str(found_exc))
+ # Since log capture depends on OS_LOG_CAPTURE, we can only assert if
+ # logging was captured
+ if os.environ.get('OS_LOG_CAPTURE'):
+ self.assertIn(clear_exc, self.log_fixture.logger.output)
+
+ def test_skip_credentials_clients_resources_credentials_clear_fails(self):
+ # If cleanup fails with no previous failure, we re-raise the exception.
+ expected_exc = 'clear exploded'
+ self.mocks['clear_credentials'].side_effect = Exception(expected_exc)
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # If setup_credentials explodes, we invoked teardown class and
+ # clear credentials, and re-raise
+ self.assertEqual(self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES,
+ self.test.fixtures_invoked)
+ found_exc = log[0][1][1]
+ self.assertIn(expected_exc, str(found_exc))
+
+ def test_skip_credentials_clients_fails(self):
+ expected_exc = 'clients exploded'
+ self.mocks['setup_clients'].side_effect = Exception(expected_exc)
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # If setup_clients explodes, we invoked teardown class and
+ # clear credentials, and re-raise
+ self.assertEqual((self.SETUP_FIXTURES[:4] +
+ [self.TEARDOWN_FIXTURES[i] for i in (0, 2)]),
+ self.test.fixtures_invoked)
+ found_exc = log[0][1][1]
+ self.assertIn(expected_exc, str(found_exc))
+
+ def test_skip_credentials_clients_resources_fails(self):
+ expected_exc = 'resource setup exploded'
+ self.mocks['resource_setup'].side_effect = Exception(expected_exc)
+ suite = unittest.TestSuite((self.test,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # If resource_setup explodes, we invoked teardown class and
+ # clear credentials and resource cleanup, and re-raise
+ self.assertEqual(self.SETUP_FIXTURES + self.TEARDOWN_FIXTURES,
+ self.test.fixtures_invoked)
+ found_exc = log[0][1][1]
+ self.assertIn(expected_exc, str(found_exc))
diff --git a/test-requirements.txt b/test-requirements.txt
index 04fd878..37644d0 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -4,9 +4,9 @@
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
# needed for doc build
sphinx>=1.6.2 # BSD
-openstackdocstheme>=1.11.0 # Apache-2.0
-reno!=2.3.1,>=1.8.0 # Apache-2.0
-mock>=2.0 # BSD
+openstackdocstheme>=1.17.0 # Apache-2.0
+reno>=2.5.0 # Apache-2.0
+mock>=2.0.0 # BSD
coverage!=4.4,>=4.0 # Apache-2.0
oslotest>=1.10.0 # Apache-2.0
flake8-import-order==0.11 # LGPLv3
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 99df0d1..dd05438 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -28,12 +28,12 @@
try:
# For Python 3.0 and later
- from urllib.error import HTTPError as HTTPError
+ from urllib.error import HTTPError
import urllib.request as urllib
except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as urllib
- from urllib2 import HTTPError as HTTPError
+ from urllib2 import HTTPError
url = 'https://review.openstack.org/projects/'
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index e6aad86..20c99b2 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -33,8 +33,8 @@
# * network access to https://git.openstack.org/cgit
# ))
#
-# If a file named data/tempest-plugins-registry.header or
-# data/tempest-plugins-registry.footer is found relative to the
+# If a file named doc/source/data/tempest-plugins-registry.header or
+# doc/source/data/tempest-plugins-registry.footer is found relative to the
# current working directory, it will be prepended or appended to
# the generated reStructuredText plugins table respectively.
@@ -43,8 +43,8 @@
(
declare -A plugins
-if [[ -r data/tempest-plugins-registry.header ]]; then
- cat data/tempest-plugins-registry.header
+if [[ -r doc/source/data/tempest-plugins-registry.header ]]; then
+ cat doc/source/data/tempest-plugins-registry.header
fi
sorted_plugins=$(python tools/generate-tempest-plugins-list.py)
@@ -56,8 +56,8 @@
printf "+----------------------------+-------------------------------------------------------------------------+\n"
done
-if [[ -r data/tempest-plugins-registry.footer ]]; then
- cat data/tempest-plugins-registry.footer
+if [[ -r doc/source/data/tempest-plugins-registry.footer ]]; then
+ cat doc/source/data/tempest-plugins-registry.footer
fi
) > doc/source/plugin-registry.rst
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
new file mode 100644
index 0000000..8b4f913
--- /dev/null
+++ b/tools/tempest-plugin-sanity.sh
@@ -0,0 +1,127 @@
+#!/usr/bin/env bash
+
+# Copyright 2017 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This script is intended to check the sanity of tempest plugins against
+# tempest master.
+# What it does:
+# * Creates the virtualenv
+# * Install tempest
+# * Retrieve the project lists having tempest plugin if project name is
+# given.
+# * For each project in a list, It does:
+# * Clone the Project
+# * Install the Project and also installs dependencies from
+# test-requirements.txt.
+# * Create Tempest workspace
+# * List tempest plugins
+# * List tempest plugins tests
+# * Uninstall the project and its dependencies
+# * Again Install tempest
+# * Again repeat the step from cloning project
+#
+# If one of the step fails, The script will exit with failure.
+
+if [ "$1" == "-h" ]; then
+ echo -e "This script performs the sanity of tempest plugins to find
+configuration and dependency issues with the tempest.\n
+Usage: sh ./tools/tempest-plugin-sanity.sh [Run sanity on tempest plugins]"
+ exit 0
+fi
+
+set -ex
+
+# retrieve a list of projects having tempest plugins
+PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
+# List of projects having tempest plugin stale or unmaintained from long time
+BLACKLIST="trio2o"
+
+# Function to clone project using zuul-cloner or from git
+function clone_project() {
+ if [ -e /usr/zuul-env/bin/zuul-cloner ]; then
+ /usr/zuul-env/bin/zuul-cloner --cache-dir /opt/git \
+ git://git.openstack.org \
+ openstack/"$1"
+
+ elif [ -e /usr/bin/git ]; then
+ /usr/bin/git clone git://git.openstack.org/openstack/"$1" \
+ openstack/"$1"
+
+ fi
+}
+
+# Create virtualenv to perform sanity operation
+SANITY_DIR=$(pwd)
+virtualenv "$SANITY_DIR"/.venv
+export TVENV="$SANITY_DIR/tools/with_venv.sh"
+cd "$SANITY_DIR"
+
+# Install tempest in a venv
+"$TVENV" pip install .
+
+# Function to install project
+function install_project() {
+ "$TVENV" pip install "$SANITY_DIR"/openstack/"$1"
+ # Check for test-requirements.txt file in a project then install it.
+ if [ -e "$SANITY_DIR"/openstack/"$1"/test-requirements.txt ]; then
+ "$TVENV" pip install -r "$SANITY_DIR"/openstack/"$1"/test-requirements.txt
+ fi
+}
+
+# Function to perform sanity checking on Tempest plugin
+function tempest_sanity() {
+ "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity
+ cd "$SANITY_DIR"/tempest_sanity
+ "$TVENV" tempest list-plugins
+ "$TVENV" tempest run -l
+ # Delete tempest workspace
+ "$TVENV" tempest workspace remove --name tempest_sanity --rmdir
+ cd "$SANITY_DIR"
+}
+
+# Function to uninstall project
+function uninstall_project() {
+ "$TVENV" pip uninstall -y "$SANITY_DIR"/openstack/"$1"
+ # Check for *requirements.txt file in a project then uninstall it.
+ if [ -e "$SANITY_DIR"/openstack/"$1"/*requirements.txt ]; then
+ "$TVENV" pip uninstall -y -r "$SANITY_DIR"/openstack/"$1"/*requirements.txt
+ fi
+ # Remove the project directory after sanity run
+ rm -fr "$SANITY_DIR"/openstack/"$1"
+}
+
+# Function to run sanity check on each project
+function plugin_sanity_check() {
+ clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
+ && uninstall_project "$1" && "$TVENV" pip install .
+}
+
+# Log status
+passed_plugin=''
+failed_plugin=''
+# Perform sanity on all tempest plugin projects
+for project in $PROJECT_LIST; do
+ # Remove blacklisted tempest plugins
+ if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
+ plugin_sanity_check $project && passed_plugin+=", $project" || \
+ failed_plugin+=", $project"
+ fi
+done
+
+# Check for failed status
+if [[ -n $failed_plugin ]]; then
+ exit 1
+fi
diff --git a/tox.ini b/tox.ini
index 892f834..21696eb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -16,11 +16,11 @@
[testenv]
setenv =
VIRTUAL_ENV={envdir}
- OS_TEST_PATH=./tempest/tests
+ OS_LOG_CAPTURE=1
PYTHONWARNINGS=default::DeprecationWarning
BRANCH_NAME=master
CLIENT_NAME=tempest
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH OS_TEST_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
install_command =
{toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
@@ -30,7 +30,7 @@
-r{toxinidir}/test-requirements.txt
commands =
find . -type f -name "*.pyc" -delete
- ostestr {posargs}
+ stestr --test-path ./tempest/tests run {posargs}
[testenv:genconfig]
commands = oslo-config-generator --config-file tempest/cmd/config-generator.tempest.conf
@@ -138,6 +138,7 @@
[testenv:docs]
commands =
+ rm -rf doc/build
python setup.py build_sphinx {posargs}
[testenv:pep8]
@@ -159,12 +160,14 @@
# E129 skipped because it is too limiting when combined with other rules
ignore = E125,E123,E129
show-source = True
-exclude = .git,.venv,.tox,dist,doc,*egg
+exclude = .git,.venv,.tox,dist,doc,*egg,build
enable-extensions = H106,H203,H904
import-order-style = pep8
[testenv:releasenotes]
-commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+commands =
+ rm -rf releasenotes/build
+ sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:pip-check-reqs]
# Do not install test-requirements as that will pollute the virtualenv for
@@ -185,3 +188,9 @@
# separately, outside of the requirements files.
deps = bindep
commands = bindep test
+
+[testenv:plugin-sanity-check]
+# perform tempest plugin sanity
+whitelist_externals = bash
+commands =
+ bash tools/tempest-plugin-sanity.sh