Merge "Delete identity domain must delete its contents"
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000..e8b258d
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,192 @@
+- job:
+ name: devstack-tempest
+ parent: devstack
+ description: Base Tempest job.
+ required-projects:
+ - openstack/tempest
+ timeout: 7200
+ roles:
+ - zuul: openstack-dev/devstack
+ vars:
+ devstack_services:
+ tempest: true
+ test_results_stage_name: 'test_results'
+ zuul_copy_output:
+ '{{ devstack_base_dir }}/tempest/etc/tempest.conf': 'logs'
+ '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': 'logs'
+ '{{ devstack_base_dir }}/tempest/tempest.log': 'logs'
+ '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': 'logs'
+ '{{ stage_dir }}/{{ test_results_stage_name }}.html': 'logs'
+ '{{ stage_dir }}/stackviz': 'logs'
+ extensions_to_txt:
+ conf: True
+ log: True
+ yaml: True
+ yml: True
+ run: playbooks/devstack-tempest.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: tempest-full
+ parent: devstack-tempest
+ # This currently works from stable/queens on.
+ branches:
+ - master
+ - stable/queens
+ description: |
+ Base integration test with Neutron networking and py27.
+ Former names for this job where:
+ * legacy-tempest-dsvm-neutron-full
+ * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+
+- job:
+ name: tempest-full-py3
+ parent: devstack-tempest
+ branches:
+ - master
+ - stable/queens
+ description: |
+ Base integration test with Neutron networking and py3.
+ Former names for this job where:
+ * legacy-tempest-dsvm-py35
+ * gate-tempest-dsvm-py35
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: True
+ FORCE_CONFIG_DRIVE: True
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+
+- job:
+ name: tempest-full-queens
+ parent: tempest-full
+ override-checkout: stable/queens
+
+- job:
+ name: tempest-full-queens-py3
+ parent: tempest-full-py3
+ override-checkout: stable/queens
+
+- job:
+ name: tempest-tox-plugin-sanity-check
+ parent: tox
+ description: |
+ Run tempest plugin sanity check script using tox.
+ nodeset: ubuntu-xenial
+ vars:
+ tox_envlist: plugin-sanity-check
+ voting: false
+ timeout: 5000
+ required-projects:
+ - openstack/almanach
+ - openstack/aodh
+ - openstack/barbican-tempest-plugin
+ - openstack/ceilometer
+ - openstack/cinder
+ - openstack/congress
+ - openstack/designate-tempest-plugin
+ - openstack/ec2-api
+ - openstack/freezer
+ - openstack/freezer-api
+ - openstack/freezer-tempest-plugin
+ - openstack/gce-api
+ - openstack/glare
+ - openstack/heat
+ - openstack/intel-nfv-ci-tests
+ - openstack/ironic
+ - openstack/ironic-inspector
+ - openstack/keystone-tempest-plugin
+ - openstack/kingbird
+ - openstack/kuryr-tempest-plugin
+ - openstack/magnum
+ - openstack/magnum-tempest-plugin
+ - openstack/manila
+ - openstack/manila-tempest-plugin
+ - openstack/mistral
+ - openstack/mogan
+ - openstack/monasca-api
+ - openstack/monasca-log-api
+ - openstack/murano
+ - openstack/networking-bgpvpn
+ - openstack/networking-cisco
+ - openstack/networking-fortinet
+ - openstack/networking-generic-switch
+ - openstack/networking-l2gw
+ - openstack/networking-midonet
+ - openstack/networking-plumgrid
+ - openstack/networking-sfc
+ - openstack/neutron
+ - openstack/neutron-dynamic-routing
+ - openstack/neutron-fwaas
+ - openstack/neutron-lbaas
+ - openstack/neutron-tempest-plugin
+ - openstack/neutron-vpnaas
+ - openstack/nova-lxd
+ - openstack/novajoin-tempest-plugin
+ - openstack/octavia-tempest-plugin
+ - openstack/oswin-tempest-plugin
+ - openstack/panko
+ - openstack/patrole
+ - openstack/qinling
+ - openstack/requirements
+ - openstack/sahara-tests
+ - openstack/senlin
+ - openstack/senlin-tempest-plugin
+ - openstack/tap-as-a-service
+ - openstack/tempest-horizon
+ - openstack/trio2o
+ - openstack/trove
+ - openstack/valet
+ - openstack/vitrage
+ - openstack/vmware-nsx-tempest-plugin
+ - openstack/watcher-tempest-plugin
+ - openstack/zaqar-tempest-plugin
+ - openstack/zun-tempest-plugin
+
+- project:
+ check:
+ jobs:
+ - devstack-tempest:
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
+ - nova-multiattach
+ - tempest-full-queens:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - tempest-full-queens-py3:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - tempest-tox-plugin-sanity-check
+ gate:
+ jobs:
+ - nova-multiattach
+ periodic-stable:
+ jobs:
+ - tempest-full-queens
+ - tempest-full-queens-py3
diff --git a/HACKING.rst b/HACKING.rst
index 8407734..f961884 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -9,14 +9,14 @@
------------------------------
- [T102] Cannot import OpenStack python clients in tempest/api &
- tempest/scenario tests
+ tempest/scenario tests
- [T104] Scenario tests require a services decorator
- [T105] Tests cannot use setUpClass/tearDownClass
- [T106] vim configuration should not be kept in source files.
- [T107] Check that a service tag isn't in the module path
- [T108] Check no hyphen at the end of rand_name() argument
- [T109] Cannot use testtools.skip decorator; instead use
- decorators.skip_because from tempest.lib
+ decorators.skip_because from tempest.lib
- [T110] Check that service client names of GET should be consistent
- [T111] Check that service client names of DELETE should be consistent
- [T112] Check that tempest.lib should not import local tempest code
@@ -84,7 +84,7 @@
It is recommended to use testtools `matcher`_ for the more tricky assertions.
You can implement your own specific `matcher`_ as well.
-.. _matcher: http://testtools.readthedocs.org/en/latest/for-test-authors.html#matchers
+.. _matcher: https://testtools.readthedocs.org/en/latest/for-test-authors.html#matchers
If the test case fails you can see the related logs and the information
carried by the exception (exception class, backtrack and exception info).
@@ -121,38 +121,38 @@
Test fixtures and resources
---------------------------
Test level resources should be cleaned-up after the test execution. Clean-up
-is best scheduled using `addCleanup` which ensures that the resource cleanup
+is best scheduled using ``addCleanup`` which ensures that the resource cleanup
code is always invoked, and in reverse order with respect to the creation
order.
-Test class level resources should be defined in the `resource_setup` method of
-the test class, except for any credential obtained from the credentials
-provider, which should be set-up in the `setup_credentials` method.
-Cleanup is best scheduled using `addClassResourceCleanup` which ensures that
+Test class level resources should be defined in the ``resource_setup`` method
+of the test class, except for any credential obtained from the credentials
+provider, which should be set-up in the ``setup_credentials`` method.
+Cleanup is best scheduled using ``addClassResourceCleanup`` which ensures that
the cleanup code is always invoked, and in reverse order with respect to the
creation order.
In both cases - test level and class level cleanups - a wait loop should be
scheduled before the actual delete of resources with an asynchronous delete.
-The test base class `BaseTestCase` defines Tempest framework for class level
-fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
+The test base class ``BaseTestCase`` defines Tempest framework for class level
+fixtures. ``setUpClass`` and ``tearDownClass`` are defined here and cannot be
overwritten by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be overwritten
by test classes. Set-up stages are:
-- `skip_checks`
-- `setup_credentials`
-- `setup_clients`
-- `resource_setup`
+- ``skip_checks``
+- ``setup_credentials``
+- ``setup_clients``
+- ``resource_setup``
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
-- `clear_credentials` (defined in the base test class)
-- `resource_cleanup`
+- ``clear_credentials`` (defined in the base test class)
+- ``resource_cleanup``
Skipping Tests
--------------
@@ -178,7 +178,7 @@
All negative tests should be based on `API-WG guideline`_ . Such negative
tests can block any changes from accurate failure code to invalid one.
-.. _API-WG guideline: http://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications
+.. _API-WG guideline: https://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications
If facing some gray area which is not clarified on the above guideline, propose
a new guideline to the API-WG. With a proposal to the API-WG we will be able to
@@ -385,7 +385,7 @@
Otherwise the bug fix won't be able to land in the project.
-Handily, `Zuul’s cross-repository dependencies
+Handily, `Zuul's cross-repository dependencies
<https://docs.openstack.org/infra/zuul/user/gating.html#cross-project-dependencies>`_.
can be leveraged to do without step 2 and to have steps 3 and 4 happen
"atomically". To do that, make the patch written in step 1 to depend (refer to
diff --git a/README.rst b/README.rst
index 17d4cba..242f4d5 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
Team and repository tags
========================
-.. image:: http://governance.openstack.org/badges/tempest.svg
+.. image:: https://governance.openstack.org/badges/tempest.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on
@@ -61,7 +61,7 @@
#. You first need to install Tempest. This is done with pip after you check out
the Tempest repo::
- $ git clone http://git.openstack.org/openstack/tempest
+ $ git clone https://git.openstack.org/openstack/tempest
$ pip install tempest/
This can be done within a venv, but the assumption for this guide is that
@@ -133,7 +133,7 @@
Release Versioning
------------------
-`Tempest Release Notes <http://docs.openstack.org/releasenotes/tempest>`_
+`Tempest Release Notes <https://docs.openstack.org/releasenotes/tempest>`_
shows what changes have been released on each version.
Tempest's released versions are broken into 2 sets of information. Depending on
@@ -183,16 +183,35 @@
Tempest also has a set of unit tests which test the Tempest code itself. These
tests can be run by specifying the test discovery path::
- $ OS_TEST_PATH=./tempest/tests testr run --parallel
+ $ stestr --test-path ./tempest/tests run
-By setting OS_TEST_PATH to ./tempest/tests it specifies that test discover
-should only be run on the unit test directory. The default value of OS_TEST_PATH
-is OS_TEST_PATH=./tempest/test_discover which will only run test discover on the
+By setting ``--test-path`` option to ./tempest/tests it specifies that test discover
+should only be run on the unit test directory. The default value of ``test_path``
+is ``test_path=./tempest/test_discover`` which will only run test discover on the
Tempest suite.
Alternatively, there are the py27 and py35 tox jobs which will run the unit
tests with the corresponding version of python.
+One common activity is to just run a single test, you can do this with tox
+simply by specifying to just run py27 or py35 tests against a single test::
+
+ $ tox -e py27 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+
+Or all tests in the test_microversions.py file::
+
+ $ tox -e py27 -- -n tempest.tests.test_microversions
+
+You may also use regular expressions to run any matching tests::
+
+ $ tox -e py27 -- test_microversions
+
+Additionally, when running a single test, or test-file, the ``-n/--no-discover``
+argument is no longer required, however it may perform faster if included.
+
+For more information on these options and details about stestr, please see the
+`stestr documentation <http://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
+
Python 2.6
----------
diff --git a/REVIEWING.rst b/REVIEWING.rst
index 7d28320..766d0c6 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -2,7 +2,7 @@
======================
To start read the `OpenStack Common Review Checklist
-<http://docs.openstack.org/infra/manual/developers.html#peer-review>`_
+<https://docs.openstack.org/infra/manual/developers.html#peer-review>`_
Ensuring code is executed
@@ -16,7 +16,7 @@
If a new test is added that depends on a new config option (like a feature
flag), the commit message must reference a change in DevStack or DevStack-Gate
that enables the execution of this newly introduced test. This reference could
-either be a `Cross-Repository Dependency <http://docs.openstack.org/infra/
+either be a `Cross-Repository Dependency <https://docs.openstack.org/infra/
manual/developers.html#cross-repository-dependencies>`_ or a simple link
to a Gerrit review.
@@ -121,8 +121,8 @@
When to approve
---------------
- * Every patch needs two +2s before being approved.
- * Its ok to hold off on an approval until a subject matter expert reviews it
- * If a patch has already been approved but requires a trivial rebase to merge,
- you do not have to wait for a second +2, since the patch has already had
- two +2s.
+* Every patch needs two +2s before being approved.
+* Its ok to hold off on an approval until a subject matter expert reviews it
+* If a patch has already been approved but requires a trivial rebase to merge,
+ you do not have to wait for a second +2, since the patch has already had
+ two +2s.
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..597b54e
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+openstackdocstheme>=1.18.1 # Apache-2.0
+reno>=2.5.0 # Apache-2.0
+sphinx!=1.6.6,>=1.6.2 # BSD
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
new file mode 100644
index 0000000..7745594
--- /dev/null
+++ b/doc/source/_extra/.htaccess
@@ -0,0 +1 @@
+redirectmatch 301 ^/developer/tempest/(.*) /tempest/latest/$1
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 067eb81..c2ea628 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -64,7 +64,7 @@
# openstackdocstheme options
repository_name = 'openstack/tempest'
bug_project = 'tempest'
-bug_tag = ''
+bug_tag = 'doc'
# Must set this variable to include year, month, day, hours, and minutes.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
@@ -154,6 +154,9 @@
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
+# Add any paths that contain "extra" files, such as .htaccess or
+# robots.txt.
+html_extra_path = ['_extra']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 8f2865a..d0d7320 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -17,10 +17,10 @@
Tempest allows for configuring a set of admin credentials in the ``auth``
section, via the following parameters:
- #. ``admin_username``
- #. ``admin_password``
- #. ``admin_project_name``
- #. ``admin_domain_name``
+#. ``admin_username``
+#. ``admin_password``
+#. ``admin_project_name``
+#. ``admin_domain_name``
Admin credentials are not mandatory to run Tempest, but when provided they
can be used to:
@@ -47,9 +47,9 @@
to provide it with information about how it communicates with keystone.
This involves configuring the following options in the ``identity`` section:
- - ``auth_version``
- - ``uri``
- - ``uri_v3``
+- ``auth_version``
+- ``uri``
+- ``uri_v3``
The ``auth_version`` option is used to tell Tempest whether it should be using
Keystone's v2 or v3 api for communicating with Keystone. The two uri options are
@@ -74,12 +74,12 @@
an admin user, and an alternate user. To enable and use dynamic credentials you
only need to configure two things:
- #. A set of admin credentials with permissions to create users and
- projects. This is specified in the ``auth`` section with the
- ``admin_username``, ``admin_project_name``, ``admin_domain_name`` and
- ``admin_password`` options
- #. To enable dynamic credentials in the ``auth`` section with the
- ``use_dynamic_credentials`` option.
+#. A set of admin credentials with permissions to create users and
+ projects. This is specified in the ``auth`` section with the
+ ``admin_username``, ``admin_project_name``, ``admin_domain_name`` and
+ ``admin_password`` options
+#. To enable dynamic credentials in the ``auth`` section with the
+ ``use_dynamic_credentials`` option.
This is also currently the default credential provider enabled by Tempest, due
to its common use and ease of configuration.
@@ -115,21 +115,21 @@
To enable and use locking test accounts you need do a few things:
- #. Create an accounts.yaml file which contains the set of pre-existing
- credentials to use for testing. To make sure you don't have a credentials
- starvation issue when running in parallel make sure you have at least two
- times the number of worker processes you are using to execute Tempest
- available in the file. (If running serially the worker count is 1.)
+#. Create an accounts.yaml file which contains the set of pre-existing
+ credentials to use for testing. To make sure you don't have a credentials
+ starvation issue when running in parallel make sure you have at least two
+ times the number of worker processes you are using to execute Tempest
+ available in the file. (If running serially the worker count is 1.)
- You can check the accounts.yaml.sample file packaged in Tempest for the yaml
- format.
- #. Provide Tempest with the location of your accounts.yaml file with the
- ``test_accounts_file`` option in the ``auth`` section
+ You can check the accounts.yaml.sample file packaged in Tempest for the yaml
+ format.
+#. Provide Tempest with the location of your accounts.yaml file with the
+ ``test_accounts_file`` option in the ``auth`` section
- *NOTE: Be sure to use a full path for the file; otherwise Tempest will
- likely not find it.*
+ *NOTE: Be sure to use a full path for the file; otherwise Tempest will
+ likely not find it.*
- #. Set ``use_dynamic_credentials = False`` in the ``auth`` group
+#. Set ``use_dynamic_credentials = False`` in the ``auth`` group
It is worth pointing out that each set of credentials in the accounts.yaml
should have a unique project. This is required to provide proper isolation
@@ -162,8 +162,8 @@
can use to boot the servers with. There are two options in the Tempest config
for doing this:
- #. ``flavor_ref``
- #. ``flavor_ref_alt``
+#. ``flavor_ref``
+#. ``flavor_ref_alt``
Both of these options are in the ``compute`` section of the config file and take
in the flavor id (not the name) from Nova. The ``flavor_ref`` option is what
@@ -181,8 +181,8 @@
Just like with flavors, Tempest needs to know which images to use for booting
servers. There are two options in the compute section just like with flavors:
- #. ``image_ref``
- #. ``image_ref_alt``
+#. ``image_ref``
+#. ``image_ref_alt``
Both options are expecting an image id (not name) from Nova. The ``image_ref``
option is what will be used for booting the majority of servers in Tempest.
@@ -192,13 +192,13 @@
There are also options in the ``scenario`` section for images:
- #. ``img_file``
- #. ``img_dir``
- #. ``aki_img_file``
- #. ``ari_img_file``
- #. ``ami_img_file``
- #. ``img_container_format``
- #. ``img_disk_format``
+#. ``img_file``
+#. ``img_dir``
+#. ``aki_img_file``
+#. ``ari_img_file``
+#. ``ami_img_file``
+#. ``img_container_format``
+#. ``img_disk_format``
However, unlike the other image options, these are used for a very small subset
of scenario tests which are uploading an image. These options are used to tell
@@ -261,7 +261,7 @@
To set a fixed network name simply:
- #. Set the ``fixed_network_name`` option in the ``compute`` group
+#. Set the ``fixed_network_name`` option in the ``compute`` group
In the case that the configured fixed network name can not be found by a user
network list call, it will be treated like one was not provided except that a
@@ -329,9 +329,9 @@
To enable remote access to servers, there are 3 options at a minimum that are used:
- #. ``run_validation``
- #. ``connect_method``
- #. ``auth_method``
+#. ``run_validation``
+#. ``connect_method``
+#. ``auth_method``
The ``run_validation`` is used to enable or disable ssh connectivity for
all tests (with the exception of scenario tests which do not have a flag for
@@ -370,9 +370,9 @@
service catalog. There are three options for each service section to accomplish
this:
- #. ``catalog_type``
- #. ``endpoint_type``
- #. ``region``
+#. ``catalog_type``
+#. ``endpoint_type``
+#. ``region``
Setting ``catalog_type`` and ``endpoint_type`` should normally give Tempest
enough information to determine which endpoint it should pull from the service
@@ -400,7 +400,7 @@
Examples:
* Good - ``http://example.com:1234/v2.0``
- * Wouldn’t work - ``http://example.com:1234/xyz/v2.0/``
+ * Wouldn't work - ``http://example.com:1234/xyz/v2.0/``
(adding prefix/suffix around version etc)
Service Feature Configuration
diff --git a/doc/source/library.rst b/doc/source/library.rst
index 074d642..14415ae 100644
--- a/doc/source/library.rst
+++ b/doc/source/library.rst
@@ -44,9 +44,9 @@
existing interfaces we have to be careful to make sure we don't break any
external consumers. Some common red flags are:
- * a change to an existing API requires a change outside the library directory
- where the interface is being consumed
- * a unit test has to be significantly changed to make the proposed change pass
+* a change to an existing API requires a change outside the library directory
+ where the interface is being consumed
+* a unit test has to be significantly changed to make the proposed change pass
Testing
'''''''
diff --git a/doc/source/library/credential_providers.rst b/doc/source/library/credential_providers.rst
index d96c97a..d25f85c 100644
--- a/doc/source/library/credential_providers.rst
+++ b/doc/source/library/credential_providers.rst
@@ -49,7 +49,7 @@
public_network_id=CONF.network.public_network_id,
create_networks=(CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network),
- resource_prefix=CONF.resources_prefix,
+ resource_prefix='tempest',
credentials_domain=CONF.auth.default_credentials_domain_name,
admin_role=CONF.identity.admin_role,
identity_uri=CONF.identity.uri_v3,
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index acf5593..942f969 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -33,46 +33,46 @@
Tempest will cover only integration testing of applicable microversions with
below exceptions:
- #. Test covers a feature which is important for interoperability. This covers tests requirement
- from Defcore.
- #. Test needed to fill Schema gaps.
- Tempest validates API responses with defined JSON schema. API responses can be different on
- each microversion and the JSON schemas need to be defined separately for the microversion.
- While implementing new integration tests for a specific microversion, there
- may be a gap in the JSON schemas (caused by previous microversions) implemented
- in Tempest.
- Filling that gap while implementing the new integration test cases is not efficient due to
- many reasons:
+#. Test covers a feature which is important for interoperability. This covers tests requirement
+ from Defcore.
+#. Test needed to fill Schema gaps.
+ Tempest validates API responses with defined JSON schema. API responses can be different on
+ each microversion and the JSON schemas need to be defined separately for the microversion.
+ While implementing new integration tests for a specific microversion, there
+ may be a gap in the JSON schemas (caused by previous microversions) implemented
+ in Tempest.
+ Filling that gap while implementing the new integration test cases is not efficient due to
+ many reasons:
- * Hard to review
- * Sync between multiple integration tests patches which try to fill the same schema gap at same
- time
- * Might delay the microversion change on project side where project team wants Tempest
- tests to verify the results.
+ * Hard to review
+ * Sync between multiple integration tests patches which try to fill the same schema gap at same
+ time
+ * Might delay the microversion change on project side where project team wants Tempest
+ tests to verify the results.
- Tempest will allow to fill the schema gaps at the end of each cycle, or more
- often if required.
- Schema gap can be filled with testing those with a minimal set of tests. Those
- tests might not be integration tests and might be already covered on project
- side also.
- This exception is needed because:
+ Tempest will allow to fill the schema gaps at the end of each cycle, or more
+ often if required.
+ Schema gap can be filled with testing those with a minimal set of tests. Those
+ tests might not be integration tests and might be already covered on project
+ side also.
+ This exception is needed because:
- * Allow to create microversion response schema in Tempest at the same time that projects are
- implementing their API microversions. This will make implementation easier for adding
- required tests before a new microversion change can be merged in the corresponding project
- and hence accelerate the development of microversions.
- * New schema must be verified by at least one test case which exercises such schema.
+ * Allow to create microversion response schema in Tempest at the same time that projects are
+ implementing their API microversions. This will make implementation easier for adding
+ required tests before a new microversion change can be merged in the corresponding project
+ and hence accelerate the development of microversions.
+ * New schema must be verified by at least one test case which exercises such schema.
- For example:
- If any projects implemented 4 API microversion say- v2.3, v2.4, v2.5, v2.6
- Assume microversion v2.3, v2.4, v2.6 change the API Response which means Tempest
- needs to add JSON schema for v2.3, v2.4, v2.6.
- In that case if only 1 or 2 tests can verify all new schemas then we do not need
- separate tests for each new schemas. In worst case, we have to add 3 separate tests.
- #. Test covers service behavior at large scale with involvement of more deep layer like hypervisor
- etc not just API/DB layer. This type of tests will be added case by case basis and
- with project team consultation about why it cannot be covered on project side and worth to test
- in Tempest.
+ For example:
+ If any projects implemented 4 API microversion say- v2.3, v2.4, v2.5, v2.6
+ Assume microversion v2.3, v2.4, v2.6 change the API Response which means Tempest
+ needs to add JSON schema for v2.3, v2.4, v2.6.
+ In that case if only 1 or 2 tests can verify all new schemas then we do not need
+ separate tests for each new schemas. In worst case, we have to add 3 separate tests.
+#. Test covers service behavior at large scale with involvement of more deep layer like hypervisor
+ etc not just API/DB layer. This type of tests will be added case by case basis and
+ with project team consultation about why it cannot be covered on project side and worth to test
+ in Tempest.
Project Scope For Microversion Testing
""""""""""""""""""""""""""""""""""""""
@@ -294,72 +294,88 @@
* Compute
- * `2.1`_
+ * `2.1`_
- .. _2.1: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id1
+ .. _2.1: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id1
- * `2.2`_
+ * `2.2`_
- .. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
+ .. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
- * `2.10`_
+ * `2.6`_
- .. _2.10: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id9
+ .. _2.6: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id5
- * `2.20`_
+ * `2.10`_
- .. _2.20: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id18
+ .. _2.10: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id9
- * `2.25`_
+ * `2.20`_
- .. _2.25: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-mitaka
+ .. _2.20: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id18
- * `2.32`_
+ * `2.21`_
- .. _2.32: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id29
+ .. _2.21: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id19
- * `2.37`_
+ * `2.25`_
- .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
+ .. _2.25: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-mitaka
- * `2.42`_
+ * `2.32`_
- .. _2.42: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-ocata
+ .. _2.32: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id29
- * `2.47`_
+ * `2.37`_
- .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
+ .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
- * `2.48`_
+ * `2.42`_
- .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+ .. _2.42: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-ocata
+
+ * `2.47`_
+
+ .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
+
+ * `2.48`_
+
+ .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+
+ * `2.60`_
+
+ .. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id54
* Volume
- * `3.3`_
+ * `3.3`_
- .. _3.3: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id3
+ .. _3.3: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id3
- * `3.9`_
+ * `3.9`_
- .. _3.9: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id9
+ .. _3.9: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id9
- * `3.11`_
+ * `3.11`_
- .. _3.11: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id11
+ .. _3.11: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id11
- * `3.12`_
+ * `3.12`_
- .. _3.12: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id12
+ .. _3.12: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id12
- * `3.14`_
+ * `3.13`_
- .. _3.14: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id14
+ .. _3.13: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id13
- * `3.19`_
+ * `3.14`_
- .. _3.19: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id18
+ .. _3.14: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id14
- * `3.20`_
+ * `3.19`_
- .. _3.20: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id19
+ .. _3.19: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id18
+
+ * `3.20`_
+
+ .. _3.20: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id19
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index 77ef9ed..6f6621d 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -29,6 +29,8 @@
* tempest.config
* tempest.test_discover.plugins
* tempest.common.credentials_factory
+* tempest.clients
+* tempest.test
If there is an interface from tempest that you need to rely on in your plugin
which is not listed above, it likely needs to be migrated to tempest.lib. In
@@ -130,7 +132,7 @@
Plugin Structure
================
-While there are no hard and fast rules for the structure a plugin, there are
+While there are no hard and fast rules for the structure of a plugin, there are
basically no constraints on what the plugin looks like as long as the 2 steps
above are done. However, there are some recommended patterns to follow to make
it easy for people to contribute and work with your plugin. For example, if you
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index 07c3046..ddae6e2 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -29,19 +29,19 @@
In the proposal etherpad we'll be looking for answers to 3 questions
- #. The tests proposed for removal must have equiv. coverage in a different
- project's test suite (whether this is another gating test project, or an in
- tree functional test suite). For API tests preferably the other project will
- have a similar source of friction in place to prevent breaking api changes
- so that we don't regress and let breaking api changes slip through the
- gate.
- #. The test proposed for removal has a failure rate < 0.50% in the gate over
- the past release (the value and interval will likely be adjusted in the
- future)
+#. The tests proposed for removal must have equiv. coverage in a different
+ project's test suite (whether this is another gating test project, or an in
+ tree functional test suite). For API tests preferably the other project will
+ have a similar source of friction in place to prevent breaking api changes
+ so that we don't regress and let breaking api changes slip through the
+ gate.
+#. The test proposed for removal has a failure rate < 0.50% in the gate over
+ the past release (the value and interval will likely be adjusted in the
+ future)
- .. _`prong #3`:
- #. There must not be an external user/consumer of tempest
- that depends on the test proposed for removal
+ .. _`prong #3`:
+#. There must not be an external user/consumer of tempest
+ that depends on the test proposed for removal
The answers to 1 and 2 are easy to verify. For 1 just provide a link to the new
test location. If you are linking to the tempest removal patch please also put
@@ -62,29 +62,34 @@
The Old Way using subunit2sql directly
""""""""""""""""""""""""""""""""""""""
-SELECT * from tests where test_id like "%test_id%";
-(where $test_id is the full test_id, but truncated to the class because of
-setUpClass or tearDownClass failures)
+``SELECT * from tests where test_id like "%test_id%";``
+(where ``$test_id`` is the full test_id, but truncated to the class because of
+``setUpClass`` or ``tearDownClass`` failures)
You can access the infra mysql subunit2sql db w/ read-only permissions with:
- * hostname: logstash.openstack.org
- * username: query
- * password: query
- * db_name: subunit2sql
+* hostname: logstash.openstack.org
+* username: query
+* password: query
+* db_name: subunit2sql
For example if you were trying to remove the test with the id:
-tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON.test_get_flavor_details_for_deleted_flavor
+``tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON.test_get_flavor_details_for_deleted_flavor``
you would run the following:
- #. run: "mysql -u query -p -h logstash.openstack.org subunit2sql" to connect
- to the subunit2sql db
- #. run the query: MySQL [subunit2sql]> select * from tests where test_id like
- "tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON%";
- which will return a table of all the tests in the class (but it will also
- catch failures in setUpClass and tearDownClass)
- #. paste the output table with numbers and the mysql command you ran to
- generate it into the etherpad.
+#. run the command: ``mysql -u query -p -h logstash.openstack.org subunit2sql``
+ to connect to the subunit2sql db
+#. run the query:
+
+ .. code-block:: console
+
+ MySQL [subunit2sql]> select * from tests where test_id like \
+ "tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON%";
+
+ which will return a table of all the tests in the class (but it will also
+ catch failures in ``setUpClass`` and ``tearDownClass``)
+#. paste the output table with numbers and the mysql command you ran to
+ generate it into the etherpad.
Eventually a cli interface will be created to make that a bit more friendly.
Also a dashboard is in the works so we don't need to manually run the command.
@@ -131,23 +136,23 @@
For the most part all tempest test removals have to go through this procedure
there are a couple of exceptions though:
- #. The class of testing has been decided to be outside the scope of tempest.
- #. A revert for a patch which added a broken test, or testing which didn't
- actually run in the gate (basically any revert for something which
- shouldn't have been added)
- #. Tests that would become out of scope as a consequence of an API change,
- as described in `API Compatibility`_.
- Such tests cannot live in Tempest because of the branchless nature of
- Tempest. Such test must still honor `prong #3`_.
+#. The class of testing has been decided to be outside the scope of tempest.
+#. A revert for a patch which added a broken test, or testing which didn't
+ actually run in the gate (basically any revert for something which
+ shouldn't have been added)
+#. Tests that would become out of scope as a consequence of an API change,
+ as described in `API Compatibility`_.
+ Such tests cannot live in Tempest because of the branchless nature of
+ Tempest. Such test must still honor `prong #3`_.
For the first exception type the only types of testing in tree which have been
declared out of scope at this point are:
- * The CLI tests (which should be completely removed at this point)
- * Neutron Adv. Services testing (which should be completely removed at this
- point)
- * XML API Tests (which should be completely removed at this point)
- * EC2 API/boto tests (which should be completely removed at this point)
+* The CLI tests (which should be completely removed at this point)
+* Neutron Adv. Services testing (which should be completely removed at this
+ point)
+* XML API Tests (which should be completely removed at this point)
+* EC2 API/boto tests (which should be completely removed at this point)
For tests that fit into this category the only criteria for removal is that
there is equivalent testing elsewhere.
@@ -159,12 +164,12 @@
are defined as in scope for direct testing in tempest. As of today that list
is:
- * Keystone
- * Nova
- * Glance
- * Cinder
- * Neutron
- * Swift
+* Keystone
+* Nova
+* Glance
+* Cinder
+* Neutron
+* Swift
anything that lives in tempest which doesn't test one of these projects can be
removed assuming there is equivalent testing elsewhere. Preferably using the
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 5a2876e..fff2405 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -36,12 +36,12 @@
In standard unittest the lifecycle of a TestCase can be described in the
following phases:
- #. setUpClass
- #. setUp
- #. Test Execution
- #. tearDown
- #. doCleanups
- #. tearDownClass
+#. setUpClass
+#. setUp
+#. Test Execution
+#. tearDown
+#. doCleanups
+#. tearDownClass
setUpClass
----------
@@ -54,18 +54,18 @@
To accomplish this you do **not** define a setUpClass function, instead there
are a number of predefined phases to setUpClass that are used. The phases are:
- * skip_checks
- * setup_credentials
- * setup_clients
- * resource_setup
+* skip_checks
+* setup_credentials
+* setup_clients
+* resource_setup
which is executed in that order. Cleanup of resources provisioned during
the resource_setup must be scheduled right after provisioning using
-the addClassResourceCleanp helper. The resource cleanups stacked this way
+the addClassResourceCleanup helper. The resource cleanups stacked this way
are executed in reverse order during tearDownClass, before the cleanup of
test credentials takes place. An example of a TestCase which defines all
of these would be::
-
+
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
new file mode 100644
index 0000000..a684984
--- /dev/null
+++ b/playbooks/devstack-tempest.yaml
@@ -0,0 +1,14 @@
+# Changes that run through devstack-tempest are likely to have an impact on
+# the devstack part of the job, so we keep devstack in the main play to
+# avoid zuul retrying on legitimate failures.
+- hosts: all
+ roles:
+ - run-devstack
+
+# We run tests only on one node, regardless how many nodes are in the system
+- hosts: tempest
+ roles:
+ - setup-tempest-run-dir
+ - setup-tempest-data-dir
+ - acl-devstack-files
+ - run-tempest
diff --git a/playbooks/post-tempest.yaml b/playbooks/post-tempest.yaml
new file mode 100644
index 0000000..ab7a1bb
--- /dev/null
+++ b/playbooks/post-tempest.yaml
@@ -0,0 +1,7 @@
+- hosts: all
+ become: true
+ roles:
+ - role: process-test-results
+ test_results_dir: '{{ devstack_base_dir }}/tempest'
+ tox_envdir: tempest
+ - role: process-stackviz
diff --git a/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml b/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
new file mode 100644
index 0000000..87a6af9
--- /dev/null
+++ b/releasenotes/notes/add-domain-param-in-cliclient-a270fcf35c8f09e6.yaml
@@ -0,0 +1,17 @@
+---
+fixes:
+ - |
+ Allow to specify new domain parameters:
+
+ * `user_domain_name`
+ * `user_domain_id`
+ * `project_domain_name`
+ * `project_domain_id`
+
+ for CLIClient class, whose values will be substituted to
+ ``--os-user-domain-name``, ``--os-user-domain-id``,
+ ``--os-project-domain-name`` and ``--os-project-domain-id`` respectively
+ during command execution.
+
+ This allows to prevent possible test failures with authentication in
+ Keystone v3. Bug: #1719687
diff --git a/releasenotes/notes/add-group-type-specs-apis-to-v3-group-types-client-10390b52dedede54.yaml b/releasenotes/notes/add-group-type-specs-apis-to-v3-group-types-client-10390b52dedede54.yaml
new file mode 100644
index 0000000..404319d
--- /dev/null
+++ b/releasenotes/notes/add-group-type-specs-apis-to-v3-group-types-client-10390b52dedede54.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Add group type specs APIs to v3 group_types_client library.
+
+ * create_or_update_group_type_specs
+ * list_group_type_specs
+ * show_group_type_specs_item
+ * update_group_type_specs_item
+ * delete_group_type_specs_item
diff --git a/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
new file mode 100644
index 0000000..403bbad
--- /dev/null
+++ b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds a new cli option to tempest run, --load-list <list-file>
+ to specify target tests to run from a list-file. The list-file
+ supports the output format of the tempest run --list-tests
+ command.
diff --git a/releasenotes/notes/add-show-default-quotas-api-to-network-quotas-client-3a7c1159af9e56ff.yaml b/releasenotes/notes/add-show-default-quotas-api-to-network-quotas-client-3a7c1159af9e56ff.yaml
new file mode 100644
index 0000000..6efe7e6
--- /dev/null
+++ b/releasenotes/notes/add-show-default-quotas-api-to-network-quotas-client-3a7c1159af9e56ff.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show default quotas API to network quotas_client library.
+ This feature enables the possibility to show default network quotas for
+ a specified project.
diff --git a/releasenotes/notes/add-show-encryption-specs-item-api-to-v2-encryption-types-client-290b421cd4bc0c0e.yaml b/releasenotes/notes/add-show-encryption-specs-item-api-to-v2-encryption-types-client-290b421cd4bc0c0e.yaml
new file mode 100644
index 0000000..9e13afc
--- /dev/null
+++ b/releasenotes/notes/add-show-encryption-specs-item-api-to-v2-encryption-types-client-290b421cd4bc0c0e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show encryption specs item API to v2 encryption_types_client library.
+ This feature enables the possibility to show specific encryption specs for
+ a volume type.
diff --git a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
new file mode 100644
index 0000000..406e282
--- /dev/null
+++ b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Add extension API show quota details to network quotas_client library.
+ This feature enables the possibility to show a quota set for a specified
+ project that includes the quota’s used, limit and reserved counts for per
+ resource
diff --git a/releasenotes/notes/add-support-args-kwargs-in-call-until-true-a91k592h5a64exf7.yaml b/releasenotes/notes/add-support-args-kwargs-in-call-until-true-a91k592h5a64exf7.yaml
new file mode 100644
index 0000000..e23abe3
--- /dev/null
+++ b/releasenotes/notes/add-support-args-kwargs-in-call-until-true-a91k592h5a64exf7.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add support of args and kwargs when calling func in call_until_true,
+ also to log the cost time when call_until_true returns True or False
+ for debuggin.
diff --git a/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml b/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml
new file mode 100644
index 0000000..14458d6
--- /dev/null
+++ b/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add update group types API to v3 ``group_types_client`` library;
+ min_microversion of this API is 3.11.
diff --git a/releasenotes/notes/add_proxy_url_get_credentials-aef66b085450513f.yaml b/releasenotes/notes/add_proxy_url_get_credentials-aef66b085450513f.yaml
new file mode 100644
index 0000000..94ab462
--- /dev/null
+++ b/releasenotes/notes/add_proxy_url_get_credentials-aef66b085450513f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add the proxy_url optional parameter to the get_credentials method in
+ tempest/lib/auth.py so that that helper can be used when going through
+ and HTTP proxy.
diff --git a/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
new file mode 100644
index 0000000..e3443c8
--- /dev/null
+++ b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
@@ -0,0 +1,9 @@
+---
+other:
+ - |
+ The CLIClient class, when it calls a command line client, uses
+ --os-project-name instead of --os-tenant-name for the project, and
+ passes --os-identity-api-version (default empty).
+ All CLI clients still available in supported releases of OpenStack
+ which are wrapped by the cmd_with_auth() method support those
+ switches.
diff --git a/releasenotes/notes/config-volume-multiattach-ea8138dfa4fd308c.yaml b/releasenotes/notes/config-volume-multiattach-ea8138dfa4fd308c.yaml
new file mode 100644
index 0000000..8d53dda
--- /dev/null
+++ b/releasenotes/notes/config-volume-multiattach-ea8138dfa4fd308c.yaml
@@ -0,0 +1,12 @@
+---
+other:
+ - |
+ A new configuration option ``[compute-feature-enabled]/volume_multiattach``
+ has been added which defaults to False. Set this to True to enable volume
+ multiattach testing. These tests require that compute API version 2.60 is
+ available and block storage API version 3.44 is available.
+
+ .. note:: In the Queens release, the only compute driver that supports
+ volume multiattach is the libvirt driver, and only then when qemu<2.10
+ or libvirt>=3.10. The only volume backend in Queens that supports volume
+ multiattach is lvm.
diff --git a/releasenotes/notes/disable-identity-v2-testing-4ef1565d1a5aedcf.yaml b/releasenotes/notes/disable-identity-v2-testing-4ef1565d1a5aedcf.yaml
new file mode 100644
index 0000000..e5d4ab7
--- /dev/null
+++ b/releasenotes/notes/disable-identity-v2-testing-4ef1565d1a5aedcf.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ As of the Queens release, tempest no longer tests the identity v2.0 API
+ because the majority of the v2.0 API have been removed from the identity
+ project. Once the Queens release reaches end-of-life, we can remove the
+ v2.0 tempest tests and clean up v2.0 testing cruft.
diff --git a/releasenotes/notes/drop-DEFAULT_PARAMS-bfcc2e7b74ef880b.yaml b/releasenotes/notes/drop-DEFAULT_PARAMS-bfcc2e7b74ef880b.yaml
new file mode 100644
index 0000000..c9a49a7
--- /dev/null
+++ b/releasenotes/notes/drop-DEFAULT_PARAMS-bfcc2e7b74ef880b.yaml
@@ -0,0 +1,13 @@
+---
+upgrade:
+ - |
+ Replace any call in your code to credentials_factory.DEFAULT_PARAMS with
+ a call to config.service_client_config().
+fixes:
+ - |
+ The credentials_factory module used to load configuration at import time
+ which caused configuration being loaded at test discovery time.
+ This was fixed by removing the DEFAULT_PARAMS variable. This variable
+ was redundant (and outdated), the same dictionary (but up to date) can
+ be obtained via invoking config.service_client_config() with no service
+ parameter.
diff --git a/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
index 775a383..a002fb8 100644
--- a/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
+++ b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
@@ -1,6 +1,6 @@
---
fixes:
- |
- Fix list_group_snapshots API in v3 group_snapshots_client: Bug#1715786.
+ Fix list_group_snapshots API in v3 group_snapshots_client: Bug#1715786.
The url path for list group snapshots with details API is changed from
``?detail=True`` to ``/detail``.
diff --git a/releasenotes/notes/http_proxy_config-cb39b55520e84db5.yaml b/releasenotes/notes/http_proxy_config-cb39b55520e84db5.yaml
new file mode 100644
index 0000000..56969de
--- /dev/null
+++ b/releasenotes/notes/http_proxy_config-cb39b55520e84db5.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - Adds a new config options, ``proxy_url``. This options is used to configure
+ running tempest through a proxy server.
+ - The RestClient class in tempest.lib.rest_client has a new kwarg parameters,
+ ``proxy_url``, that is used to set a proxy server.
+ - A new class was added to tempest.lib.http, ClosingProxyHttp. This behaves
+ identically to ClosingHttp except that it requires a proxy url and will
+ establish a connection through a proxy
diff --git a/releasenotes/notes/intermediate-queens-release-2f9f305775fca454.yaml b/releasenotes/notes/intermediate-queens-release-2f9f305775fca454.yaml
new file mode 100644
index 0000000..1493b0b
--- /dev/null
+++ b/releasenotes/notes/intermediate-queens-release-2f9f305775fca454.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+ This is an intermediate release during the Queens development cycle to
+ make new functionality available to plugins and other consumers.
diff --git a/releasenotes/notes/list-auth-domains-v3-endpoint-9ec60c7d3011c397.yaml b/releasenotes/notes/list-auth-domains-v3-endpoint-9ec60c7d3011c397.yaml
new file mode 100644
index 0000000..0f104cf
--- /dev/null
+++ b/releasenotes/notes/list-auth-domains-v3-endpoint-9ec60c7d3011c397.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add ``list_auth_domains`` API endpoint to the identity v3 client. This
+ allows the possibility of listing all domains a user has access to
+ via role assignments.
diff --git a/releasenotes/notes/make-account-client-as-stable-interface-d1b07c7e8f17bef6.yaml b/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
similarity index 84%
rename from releasenotes/notes/make-account-client-as-stable-interface-d1b07c7e8f17bef6.yaml
rename to releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
index 9d5a1f5..2bba952 100644
--- a/releasenotes/notes/make-account-client-as-stable-interface-d1b07c7e8f17bef6.yaml
+++ b/releasenotes/notes/make-object-storage-client-as-stable-interface-d1b07c7e8f17bef6.yaml
@@ -7,3 +7,5 @@
without any maintenance changes.
* account_client
+ * container_client
+ * object_client
diff --git a/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml b/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml
new file mode 100644
index 0000000..e15d387
--- /dev/null
+++ b/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ Below config options or feature flags were deprecated for removal.
+ It's time to remove them as all supported stable branches are
+ good to handle them.
+
+ * ``[identity-feature-enabled].forbid_global_implied_dsr``
+ * ``[image-feature-enabled].deactivate_image``
+ * ``[default].resources_prefix``
+ * config group ``orchestration``
+ * ``[service_available].heat``
diff --git a/releasenotes/notes/remove-deprecated-apis-from-v2-volumes-client-3ca4a5db5fea518f.yaml b/releasenotes/notes/remove-deprecated-apis-from-v2-volumes-client-3ca4a5db5fea518f.yaml
new file mode 100644
index 0000000..c75da2e
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-apis-from-v2-volumes-client-3ca4a5db5fea518f.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+ - |
+ Remove deprecated APIs from volume v2 volumes_client, and the deprecated
+ APIs are re-realized in volume v2 transfers_client.
+
+ * create_volume_transfer
+ * show_volume_transfer
+ * list_volume_transfers
+ * delete_volume_transfer
+ * accept_volume_transfer
diff --git a/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml b/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml
new file mode 100644
index 0000000..621731d
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Remove the deprecated decorator ``skip_unless_attr`` in lib/decorators.py.
diff --git a/releasenotes/notes/remove-deprecated-volume-apis-from-v2-volumes-client-cf35e5b4cca89860.yaml b/releasenotes/notes/remove-deprecated-volume-apis-from-v2-volumes-client-cf35e5b4cca89860.yaml
new file mode 100644
index 0000000..12ac5b5
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-volume-apis-from-v2-volumes-client-cf35e5b4cca89860.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove deprecated APIs (``show_pools`` and ``show_backend_capabilities``)
+ from volume v2 volumes_client, and the deprecated APIs are re-realized in
+ volume v2 scheduler_stats_client (``list_pools``) and capabilities_client
+ (``show_backend_capabilities``) accordingly.
diff --git a/releasenotes/notes/remove-get-ipv6-addr-by-EUI64-c79972d799c7a430.yaml b/releasenotes/notes/remove-get-ipv6-addr-by-EUI64-c79972d799c7a430.yaml
new file mode 100644
index 0000000..609000c
--- /dev/null
+++ b/releasenotes/notes/remove-get-ipv6-addr-by-EUI64-c79972d799c7a430.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Remove deprecated get_ipv6_addr_by_EUI64 method from data_utils.
+ Use the same method from oslo_utils.netutils.
diff --git a/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml b/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml
new file mode 100644
index 0000000..17866e5
--- /dev/null
+++ b/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The tox ostestr job (normally invoked with ``tox -eostestr``) has been
+ removed. This was lightly used, and in the near future ostestr will be
+ removed from the tempest requirements file. If you were relying on this
+ functionality you can replicate it by using the venv-tempest tox job. For
+ example, simply running ``tox -evenv-tempest -- ostestr`` will do the same
+ thing the old ostestr job did.
diff --git a/releasenotes/notes/test-clients-stable-for-plugin-90b1e7dc83f28ccd.yaml b/releasenotes/notes/test-clients-stable-for-plugin-90b1e7dc83f28ccd.yaml
new file mode 100644
index 0000000..e27ee33
--- /dev/null
+++ b/releasenotes/notes/test-clients-stable-for-plugin-90b1e7dc83f28ccd.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Two extra modules are now marked as stable for plugins, test.py and clients.py.
+ The former includes the test base class with its automatic credentials
+ provisioning and test resource managing fixtures.
+ The latter is built on top of ServiceClients and it adds aliases and a few custom
+ configurations to it.
diff --git a/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml b/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml
new file mode 100644
index 0000000..ddd1704
--- /dev/null
+++ b/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ A new boolean configuration option
+ ``[compute-feature-enabled]/volume_backed_live_migration`` has been added.
+ If enabled, tests which validate the behavior of Nova's *volume-backed live
+ migration* feature will be executed. The option defaults to ``False``.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index ae3dca1..57ec7e1 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -65,16 +65,12 @@
project = u'tempest Release Notes'
copyright = u'2016, tempest Developers'
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-from tempest.version import version_info as tempest_version
+# Release do not need a version number in the title, they
+# cover multiple versions.
# The full version, including alpha/beta/rc tags.
-release = tempest_version.version_string_with_vcs()
+release = ''
# The short X.Y version.
-version = tempest_version.canonical_version_string()
+version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/requirements.txt b/requirements.txt
index 911f0e5..c02cd05 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,21 +2,21 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
-cliff>=2.8.0 # Apache-2.0
+cliff!=2.9.0,>=2.8.0 # Apache-2.0
jsonschema<3.0.0,>=2.6.0 # MIT
-testtools>=1.4.0 # MIT
+testtools>=2.2.0 # MIT
paramiko>=2.0.0 # LGPLv2.1+
netaddr>=0.7.18 # BSD
testrepository>=0.0.18 # Apache-2.0/BSD
-oslo.concurrency>=3.20.0 # Apache-2.0
-oslo.config>=4.6.0 # Apache-2.0
-oslo.log>=3.30.0 # Apache-2.0
+oslo.concurrency>=3.25.0 # Apache-2.0
+oslo.config>=5.1.0 # Apache-2.0
+oslo.log>=3.36.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.utils>=3.28.0 # Apache-2.0
-six>=1.9.0 # MIT
+oslo.utils>=3.33.0 # Apache-2.0
+six>=1.10.0 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
PyYAML>=3.10 # MIT
-python-subunit>=0.0.18 # Apache-2.0/BSD
+python-subunit>=1.0.0 # Apache-2.0/BSD
stevedore>=1.20.0 # Apache-2.0
PrettyTable<0.8,>=0.7.1 # BSD
os-testr>=1.0.0 # Apache-2.0
diff --git a/roles/acl-devstack-files/README.rst b/roles/acl-devstack-files/README.rst
new file mode 100644
index 0000000..76e7e58
--- /dev/null
+++ b/roles/acl-devstack-files/README.rst
@@ -0,0 +1,10 @@
+Grant global read access to devstack `files` folder.
+
+This is handy to grant the `tempest` user access to VM images for testing.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_data_dir
+ :default: /opt/stack/data
+
+ The devstack data directory.
diff --git a/roles/acl-devstack-files/defaults/main.yaml b/roles/acl-devstack-files/defaults/main.yaml
new file mode 100644
index 0000000..14265f0
--- /dev/null
+++ b/roles/acl-devstack-files/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_data_dir: /opt/stack/data
diff --git a/roles/acl-devstack-files/tasks/main.yaml b/roles/acl-devstack-files/tasks/main.yaml
new file mode 100644
index 0000000..b3eeec7
--- /dev/null
+++ b/roles/acl-devstack-files/tasks/main.yaml
@@ -0,0 +1,6 @@
+- name: Grant global read access to devstack files
+ file:
+ path: "{{devstack_data_dir}}/files"
+ mode: "o+rx"
+ recurse: yes
+ become: yes
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
new file mode 100644
index 0000000..54c217b
--- /dev/null
+++ b/roles/process-stackviz/README.rst
@@ -0,0 +1,22 @@
+Generate stackviz report.
+
+Generate stackviz report using subunit and dstat data, using
+the stackviz archive embedded in test images.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: stage_dir
+ :default: "{{ ansible_user_dir }}"
+
+ The stage directory where the input data can be found and
+ the output will be produced.
+
+.. zuul:rolevar:: test_results_stage_name
+ :default: test_results
+
+ The name of the subunit file to be used as input.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
new file mode 100644
index 0000000..c6a64d1
--- /dev/null
+++ b/roles/process-stackviz/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
+test_results_stage_name: test_results
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
new file mode 100644
index 0000000..09de606
--- /dev/null
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -0,0 +1,63 @@
+- name: Check if stackviz archive exists
+ stat:
+ path: "/opt/cache/files/stackviz-latest.tar.gz"
+ register: stackviz_archive
+
+- debug:
+ msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
+ when: not stackviz_archive.stat.exists
+
+- name: Check if subunit data exists
+ stat:
+ path: "{{ stage_dir }}/{{ test_results_stage_name }}.subunit"
+ register: subunit_input
+
+- debug:
+ msg: "Subunit file could not be found at {{ stage_dir }}/{{ test_results_stage_name }}.subunit"
+ when: not subunit_input.stat.exists
+
+- name: Install stackviz
+ pip:
+ name: "file://{{ stackviz_archive.stat.path }}"
+ virtualenv: /tmp/stackviz
+ extra_args: -U
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+- name: Deploy stackviz static html+js
+ command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+- name: Check if dstat data exists
+ stat:
+ path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
+ register: dstat_input
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+- name: Run stackviz with dstat
+ shell: |
+ cat {{ subunit_input.stat.path }} | \
+ /tmp/stackviz/bin/stackviz-export \
+ --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
+ --env --stdin \
+ {{ stage_dir }}/stackviz/data
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ - dstat_input.stat.exists
+
+- name: Run stackviz without dstat
+ shell: |
+ cat {{ subunit_input.stat.path }} | \
+ /tmp/stackviz/bin/stackviz-export \
+ --env --stdin \
+ {{ stage_dir }}/stackviz/data
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ - not dstat_input.stat.exists
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
new file mode 100644
index 0000000..33dcce9
--- /dev/null
+++ b/roles/run-tempest/README.rst
@@ -0,0 +1,35 @@
+Run Tempest
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: tempest_concurrency
+ :default: 0
+
+ The number of parallel test processes.
+
+.. zuul:rolevar:: tempest_test_regex
+ :default: ''
+
+ A regular expression used to select the tests.
+
+ It works only when used with some specific tox environments
+ ('all', 'all-plugin'.)
+
+ Multi-line and commented regexs can be achieved by doing this:
+
+ ::
+ vars:
+ tempest_test_regex: |
+ (?x) # Ignore comments and whitespaces
+ # Line with only a comment.
+ (tempest\.(api|scenario|thirdparty)).*$ # Run only api scenario and third party
+
+.. zuul:rolevar:: tox_envlist
+ :default: smoke
+
+ The Tempest tox environment to run.
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
new file mode 100644
index 0000000..85e94f2
--- /dev/null
+++ b/roles/run-tempest/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+tempest_test_regex: ''
+tox_envlist: smoke
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
new file mode 100644
index 0000000..87898db
--- /dev/null
+++ b/roles/run-tempest/tasks/main.yaml
@@ -0,0 +1,28 @@
+# NOTE(andreaf) The number of vcpus is not available on all systems.
+# See https://github.com/ansible/ansible/issues/30688
+# When not available, we fall back to ansible_processor_cores
+- name: Get hw.logicalcpu from sysctl
+ shell: sysctl hw.logicalcpu | cut -d' ' -f2
+ register: sysctl_hw_logicalcpu
+ when: ansible_processor_vcpus is not defined
+
+- name: Number of cores
+ set_fact:
+ num_cores: "{{ansible_processor_vcpus|default(sysctl_hw_logicalcpu.stdout)}}"
+
+- name: Set concurrency for cores == 3 or less
+ set_fact:
+ default_concurrency: "{{ num_cores }}"
+ when: num_cores|int <= 3
+
+- name: Limit max concurrency when more than 3 vcpus are available
+ set_fact:
+ default_concurrency: "{{ num_cores|int // 2 }}"
+ when: num_cores|int > 3
+
+- name: Run Tempest
+ command: tox -e {{tox_envlist}} -- {{tempest_test_regex|quote}} --concurrency={{tempest_concurrency|default(default_concurrency)}}
+ args:
+ chdir: "{{devstack_base_dir}}/tempest"
+ become: true
+ become_user: tempest
diff --git a/roles/setup-tempest-data-dir/README.rst b/roles/setup-tempest-data-dir/README.rst
new file mode 100644
index 0000000..db0b083
--- /dev/null
+++ b/roles/setup-tempest-data-dir/README.rst
@@ -0,0 +1,12 @@
+Setup the `tempest` user as owner of Tempest's data folder.
+
+Tempest's devstack plugin creates the data folder, but it has no knowledge
+of the `tempest` user, so we need a role to fix ownership on the data folder.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_data_dir
+ :default: /opt/stack/data
+
+ The devstack data directory.
diff --git a/roles/setup-tempest-data-dir/defaults/main.yaml b/roles/setup-tempest-data-dir/defaults/main.yaml
new file mode 100644
index 0000000..14265f0
--- /dev/null
+++ b/roles/setup-tempest-data-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_data_dir: /opt/stack/data
diff --git a/roles/setup-tempest-data-dir/tasks/main.yaml b/roles/setup-tempest-data-dir/tasks/main.yaml
new file mode 100644
index 0000000..9dd6309
--- /dev/null
+++ b/roles/setup-tempest-data-dir/tasks/main.yaml
@@ -0,0 +1,7 @@
+- name: Set tempest as owner of Tempest data folder
+ file:
+ path: "{{devstack_data_dir}}/tempest"
+ owner: tempest
+ group: stack
+ recurse: yes
+ become: yes
diff --git a/roles/setup-tempest-run-dir/README.rst b/roles/setup-tempest-run-dir/README.rst
new file mode 100644
index 0000000..c8e2339
--- /dev/null
+++ b/roles/setup-tempest-run-dir/README.rst
@@ -0,0 +1,14 @@
+Setup Tempest run folder.
+
+To support isolation between multiple runs, separate run folders are required.
+Set `tempest` as owner of Tempest's current run folder.
+There is an implicit assumption here of a one to one relationship between
+devstack versions and Tempest runs.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/setup-tempest-run-dir/defaults/main.yaml b/roles/setup-tempest-run-dir/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/setup-tempest-run-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/setup-tempest-run-dir/tasks/main.yaml b/roles/setup-tempest-run-dir/tasks/main.yaml
new file mode 100644
index 0000000..a012d72
--- /dev/null
+++ b/roles/setup-tempest-run-dir/tasks/main.yaml
@@ -0,0 +1,7 @@
+- name: Set tempest as owner of Tempest run folder
+ file:
+ path: "{{devstack_base_dir}}/tempest"
+ owner: tempest
+ group: stack
+ recurse: yes
+ become: yes
diff --git a/tempest/README.rst b/tempest/README.rst
index 663653e..62821de 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -12,10 +12,12 @@
and guidelines. Below is the overview of the Tempest respository structure
to make this clear.
-| tempest/
-| api/ - API tests
-| scenario/ - complex scenario tests
-| tests/ - unit tests for Tempest internals
+ .. code-block:: console
+
+ tempest/
+ api/ - API tests
+ scenario/ - complex scenario tests
+ tests/ - unit tests for Tempest internals
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index 41be620..36ff09e 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -27,7 +27,6 @@
def setup_clients(cls):
super(AggregatesAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_admin.aggregates_client
- cls.user_client = cls.aggregates_client
@classmethod
def resource_setup(cls):
@@ -52,7 +51,7 @@
# Regular user is not allowed to create an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
self.assertRaises(lib_exc.Forbidden,
- self.user_client.create_aggregate,
+ self.aggregates_client.create_aggregate,
name=aggregate_name)
@decorators.attr(type=['negative'])
@@ -87,7 +86,7 @@
# Regular user is not allowed to delete an aggregate.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.Forbidden,
- self.user_client.delete_aggregate,
+ self.aggregates_client.delete_aggregate,
aggregate['id'])
@decorators.attr(type=['negative'])
@@ -95,7 +94,7 @@
def test_aggregate_list_as_user(self):
# Regular user is not allowed to list aggregates.
self.assertRaises(lib_exc.Forbidden,
- self.user_client.list_aggregates)
+ self.aggregates_client.list_aggregates)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('557cad12-34c9-4ff4-95f0-22f0dfbaf7dc')
@@ -103,7 +102,7 @@
# Regular user is not allowed to get aggregate details.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.Forbidden,
- self.user_client.show_aggregate,
+ self.aggregates_client.show_aggregate,
aggregate['id'])
@decorators.attr(type=['negative'])
@@ -140,7 +139,7 @@
# Regular user is not allowed to add a host to an aggregate.
aggregate = self._create_test_aggregate()
self.assertRaises(lib_exc.Forbidden,
- self.user_client.add_host,
+ self.aggregates_client.add_host,
aggregate['id'], host=self.host)
@decorators.attr(type=['negative'])
@@ -168,7 +167,7 @@
host=self.host)
self.assertRaises(lib_exc.Forbidden,
- self.user_client.remove_host,
+ self.aggregates_client.remove_host,
aggregate['id'], host=self.host)
@decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index a9772c4..c4d5768 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -84,8 +84,7 @@
nets = cls.networks_client.list_networks(
**search_opts).get('networks', [])
if nets:
- raise lib_excs.TempestException(
- 'Found shared networks: %s' % nets)
+ raise cls.skipException('Found shared networks: %s' % nets)
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 08b2d19..711b441 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -56,6 +56,18 @@
# Create a flavor with ephemeral disk
flavor = self.create_flavor(name=flavor_name, ram=ram, vcpus=vcpus,
disk=disk, ephemeral=ephem_disk)
+
+ # Set extra specs same as self.flavor_ref for the created flavor,
+ # because the environment may need some special extra specs to
+ # create server which should have been contained in
+ # self.flavor_ref.
+ extra_spec_keys = \
+ self.admin_flavors_client.list_flavor_extra_specs(
+ self.flavor_ref)['extra_specs']
+ if extra_spec_keys:
+ self.admin_flavors_client.set_flavor_extra_spec(
+ flavor['id'], **extra_spec_keys)
+
return flavor['id']
flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index ebba73c..66c2c2d 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -42,6 +42,7 @@
super(FixedIPsTestJson, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.show_server(server['id'])['server']
+ cls.ip = None
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
@@ -49,6 +50,9 @@
break
if cls.ip:
break
+ if cls.ip is None:
+ raise cls.skipException("No fixed ip found for server: %s"
+ % server['id'])
@decorators.idempotent_id('16b7d848-2f7c-4709-85a3-2dfb4576cc52')
def test_list_fixed_ip_details(self):
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index a5deb3c..7d41f46 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -43,6 +43,7 @@
super(FixedIPsNegativeTestJson, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.show_server(server['id'])['server']
+ cls.ip = None
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
@@ -50,6 +51,9 @@
break
if cls.ip:
break
+ if cls.ip is None:
+ raise cls.skipException("No fixed ip found for server: %s"
+ % server['id'])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f17f47d-daad-4adc-986e-12370c93e407')
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 404fd94..b23c59f 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -17,12 +17,12 @@
from tempest.lib import decorators
-class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
+class HypervisorAdminTestBase(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
- super(HypervisorAdminTestJSON, cls).setup_clients()
+ super(HypervisorAdminTestBase, cls).setup_clients()
cls.client = cls.os_admin.hypervisor_client
def _list_hypervisors(self):
@@ -30,6 +30,10 @@
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
+
+class HypervisorAdminTestJSON(HypervisorAdminTestBase):
+ """Tests Hypervisors API that require admin privileges"""
+
@decorators.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
@@ -53,17 +57,6 @@
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
- @decorators.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
- def test_get_hypervisor_show_servers(self):
- # Show instances about the specific hypervisors
- hypers = self._list_hypervisors()
- self.assertNotEmpty(hypers, "No hypervisors found.")
-
- hostname = hypers[0]['hypervisor_hostname']
- hypervisors = (self.client.list_servers_on_hypervisor(hostname)
- ['hypervisors'])
- self.assertNotEmpty(hypervisors)
-
@decorators.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
@@ -110,6 +103,21 @@
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
+
+class HypervisorAdminUnderV252Test(HypervisorAdminTestBase):
+ max_microversion = '2.52'
+
+ @decorators.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
+ def test_get_hypervisor_show_servers(self):
+ # Show instances about the specific hypervisors
+ hypers = self._list_hypervisors()
+ self.assertNotEmpty(hypers, "No hypervisors found.")
+
+ hostname = hypers[0]['hypervisor_hostname']
+ hypervisors = (self.client.list_servers_on_hypervisor(hostname)
+ ['hypervisors'])
+ self.assertNotEmpty(hypervisors)
+
@decorators.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
diff --git a/tempest/api/compute/admin/test_hypervisor_negative.py b/tempest/api/compute/admin/test_hypervisor_negative.py
index 431e823..0056376 100644
--- a/tempest/api/compute/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/admin/test_hypervisor_negative.py
@@ -19,12 +19,12 @@
from tempest.lib import exceptions as lib_exc
-class HypervisorAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
+class HypervisorAdminNegativeTestBase(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
- super(HypervisorAdminNegativeTestJSON, cls).setup_clients()
+ super(HypervisorAdminNegativeTestBase, cls).setup_clients()
cls.client = cls.os_admin.hypervisor_client
cls.non_adm_client = cls.hypervisor_client
@@ -33,6 +33,10 @@
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
+
+class HypervisorAdminNegativeTestJSON(HypervisorAdminNegativeTestBase):
+ """Tests Hypervisors API that require admin privileges"""
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c136086a-0f67-4b2b-bc61-8482bd68989f')
def test_show_nonexistent_hypervisor(self):
@@ -55,27 +59,6 @@
hypers[0]['id'])
@decorators.attr(type=['negative'])
- @decorators.idempotent_id('2a0a3938-832e-4859-95bf-1c57c236b924')
- def test_show_servers_with_non_admin_user(self):
- hypers = self._list_hypervisors()
- self.assertNotEmpty(hypers)
-
- self.assertRaises(
- lib_exc.Forbidden,
- self.non_adm_client.list_servers_on_hypervisor,
- hypers[0]['id'])
-
- @decorators.attr(type=['negative'])
- @decorators.idempotent_id('02463d69-0ace-4d33-a4a8-93d7883a2bba')
- def test_show_servers_with_nonexistent_hypervisor(self):
- nonexistent_hypervisor_id = data_utils.rand_uuid()
-
- self.assertRaises(
- lib_exc.NotFound,
- self.client.list_servers_on_hypervisor,
- nonexistent_hypervisor_id)
-
- @decorators.attr(type=['negative'])
@decorators.idempotent_id('e2b061bb-13f9-40d8-9d6e-d5bf17595849')
def test_get_hypervisor_stats_with_non_admin_user(self):
self.assertRaises(
@@ -119,13 +102,30 @@
lib_exc.Forbidden,
self.non_adm_client.list_hypervisors, detail=True)
+
+class HypervisorAdminNegativeUnderV252Test(HypervisorAdminNegativeTestBase):
+ max_microversion = '2.52'
+
@decorators.attr(type=['negative'])
- @decorators.idempotent_id('19a45cc1-1000-4055-b6d2-28e8b2ec4faa')
- def test_search_nonexistent_hypervisor(self):
+ @decorators.idempotent_id('2a0a3938-832e-4859-95bf-1c57c236b924')
+ def test_show_servers_with_non_admin_user(self):
+ hypers = self._list_hypervisors()
+ self.assertNotEmpty(hypers)
+
+ self.assertRaises(
+ lib_exc.Forbidden,
+ self.non_adm_client.list_servers_on_hypervisor,
+ hypers[0]['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('02463d69-0ace-4d33-a4a8-93d7883a2bba')
+ def test_show_servers_with_nonexistent_hypervisor(self):
+ nonexistent_hypervisor_id = data_utils.rand_uuid()
+
self.assertRaises(
lib_exc.NotFound,
- self.client.search_hypervisor,
- 'nonexistent_hypervisor_name')
+ self.client.list_servers_on_hypervisor,
+ nonexistent_hypervisor_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5b6a6c79-5dc1-4fa5-9c58-9c8085948e74')
@@ -137,3 +137,11 @@
lib_exc.Forbidden,
self.non_adm_client.search_hypervisor,
hypers[0]['hypervisor_hostname'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('19a45cc1-1000-4055-b6d2-28e8b2ec4faa')
+ def test_search_nonexistent_hypervisor(self):
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.client.search_hypervisor,
+ 'nonexistent_hypervisor_name')
diff --git a/tempest/api/compute/admin/test_keypairs_v210.py b/tempest/api/compute/admin/test_keypairs_v210.py
index e24c7c1..24ea8a1 100644
--- a/tempest/api/compute/admin/test_keypairs_v210.py
+++ b/tempest/api/compute/admin/test_keypairs_v210.py
@@ -34,7 +34,8 @@
k_name = data_utils.rand_name('keypair')
keypair = self.create_keypair(k_name,
keypair_type='ssh',
- user_id=user_id)
+ user_id=user_id,
+ client=self.client)
self.assertEqual(k_name, keypair['name'],
"The created keypair name is not equal "
"to the requested name!")
@@ -56,7 +57,8 @@
self.assertEqual(user_id, keypair_detail['user_id'],
"The fetched keypair is not for requested user!")
# Create a admin keypair
- admin_keypair = self.create_keypair(keypair_type='ssh')
+ admin_keypair = self.create_keypair(keypair_type='ssh',
+ client=self.client)
admin_keypair.pop('private_key', None)
admin_keypair.pop('user_id')
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 14be947..2398cf1 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -46,6 +46,18 @@
"Less than 2 compute nodes, skipping migration test.")
@classmethod
+ def setup_credentials(cls):
+ # These tests don't attempt any SSH validation nor do they use
+ # floating IPs on the instance, so all we need is a network and
+ # a subnet so the instance being migrated has a single port, but
+ # we need that to make sure we are properly updating the port
+ # host bindings during the live migration.
+ # TODO(mriedem): SSH validation before and after the instance is
+ # live migrated would be a nice test wrinkle addition.
+ cls.set_network_resources(network=True, subnet=True)
+ super(LiveMigrationTest, cls).setup_credentials()
+
+ @classmethod
def setup_clients(cls):
super(LiveMigrationTest, cls).setup_clients()
cls.admin_migration_client = cls.os_admin.migrations_client
@@ -120,7 +132,9 @@
def test_live_block_migration_paused(self):
self._test_live_migration(state='PAUSED')
- @decorators.skip_because(bug="1524898")
+ @testtools.skipUnless(CONF.compute_feature_enabled.
+ volume_backed_live_migration,
+ 'Volume-backed live migration not available')
@decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
@utils.services('volume')
def test_volume_backed_live_migration(self):
@@ -144,14 +158,11 @@
self.attach_volume(server, volume, device='/dev/xvdb')
server = self.admin_servers_client.show_server(server_id)['server']
volume_id1 = server["os-extended-volumes:volumes_attached"][0]["id"]
- self._migrate_server_to(server_id, target_host)
- waiters.wait_for_server_status(self.servers_client,
- server_id, 'ACTIVE')
+ self._live_migrate(server_id, target_host, 'ACTIVE')
server = self.admin_servers_client.show_server(server_id)['server']
volume_id2 = server["os-extended-volumes:volumes_attached"][0]["id"]
- self.assertEqual(target_host, self.get_host_for_server(server_id))
self.assertEqual(volume_id1, volume_id2)
@@ -190,10 +201,7 @@
self._verify_console_interaction(server01_id)
self._verify_console_interaction(server02_id)
- self._migrate_server_to(server01_id, host02_id)
- waiters.wait_for_server_status(self.servers_client,
- server01_id, 'ACTIVE')
- self.assertEqual(host02_id, self.get_host_for_server(server01_id))
+ self._live_migrate(server01_id, host02_id, 'ACTIVE')
self._verify_console_interaction(server01_id)
# At this point, both instances have a valid serial console
# connection, which means the ports got updated.
@@ -216,8 +224,8 @@
while data not in console_output and t <= 120.0:
try:
ws.send_frame(data)
- recieved = ws.receive_frame()
- console_output += recieved
+ received = ws.receive_frame()
+ console_output += received
except Exception:
# In case we had an issue with send/receive on the
# websocket connection, we create a new one.
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index a626ebb..a6b71b2 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -77,6 +77,16 @@
)['flavor']
self.addCleanup(self._flavor_clean_up, flavor['id'])
+ # Set extra specs same as self.flavor_ref for the created flavor,
+ # because the environment may need some special extra specs to
+ # create server which should have been contained in
+ # self.flavor_ref.
+ extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
+ self.flavor_ref)['extra_specs']
+ if extra_spec_keys:
+ self.admin_flavors_client.set_flavor_extra_spec(
+ flavor['id'], **extra_spec_keys)
+
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index acb0d90..87ce39d 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -24,7 +24,7 @@
"""Tests Nova Networks API that usually requires admin privileges.
API docs:
- http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks
+ https://developer.openstack.org/api-ref/compute/#networks-os-networks-deprecated
"""
@classmethod
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 2e7b07b..18c974a 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -111,3 +111,37 @@
hostnames = list(hosts.values())
self.assertNotEqual(hostnames[0], hostnames[1],
'Servers are on the same host: %s' % hosts)
+
+ @decorators.idempotent_id('9d2e924a-baf4-11e7-b856-fa163e65f5ce')
+ @testtools.skipUnless(
+ compute.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
+ 'ServerGroupAffinityFilter is not available.')
+ def test_create_server_with_scheduler_hint_group_affinity(self):
+ """Tests the ServerGroupAffinityFilter
+
+ Creates two servers in an affinity server group and
+ asserts the servers are in the group and on same host.
+ """
+ group_id = self.create_test_server_group(policy=['affinity'])['id']
+ hints = {'group': group_id}
+ reservation_id = self.create_test_server(
+ scheduler_hints=hints, wait_until='ACTIVE', min_count=2,
+ return_reservation_id=True)['reservation_id']
+
+ # Get the servers using the reservation_id.
+ servers = self.servers_client.list_servers(
+ detail=True, reservation_id=reservation_id)['servers']
+ self.assertEqual(2, len(servers))
+
+ # Assert the servers are in the group.
+ server_group = self.server_groups_client.show_server_group(
+ group_id)['server_group']
+ hosts = {}
+ for server in servers:
+ self.assertIn(server['id'], server_group['members'])
+ hosts[server['id']] = self._get_host(server['id'])
+
+ # Assert the servers are on same host.
+ hostnames = hosts.values()
+ self.assertEqual(hostnames[0], hostnames[1],
+ 'Servers are on the different hosts: %s' % hosts)
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index d715a42..99bad8f 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -22,30 +22,16 @@
CONF = config.CONF
-class TestVolumeSwap(base.BaseV2ComputeAdminTest):
- """The test suite for swapping of volume with admin user.
-
- The following is the scenario outline:
- 1. Create a volume "volume1" with non-admin.
- 2. Create a volume "volume2" with non-admin.
- 3. Boot an instance "instance1" with non-admin.
- 4. Attach "volume1" to "instance1" with non-admin.
- 5. Swap volume from "volume1" to "volume2" as admin.
- 6. Check the swap volume is successful and "volume2"
- is attached to "instance1" and "volume1" is in available state.
- 7. Swap volume from "volume2" to "volume1" as admin.
- 8. Check the swap volume is successful and "volume1"
- is attached to "instance1" and "volume2" is in available state.
- """
+class TestVolumeSwapBase(base.BaseV2ComputeAdminTest):
@classmethod
def skip_checks(cls):
- super(TestVolumeSwap, cls).skip_checks()
+ super(TestVolumeSwapBase, cls).skip_checks()
if not CONF.compute_feature_enabled.swap_volume:
raise cls.skipException("Swapping volumes is not supported.")
- def _wait_for_server_volume_swap(self, server_id, old_volume_id,
- new_volume_id):
+ def wait_for_server_volume_swap(self, server_id, old_volume_id,
+ new_volume_id):
"""Waits for a server to swap the old volume to a new one."""
volume_attachments = self.servers_client.list_volume_attachments(
server_id)['volumeAttachments']
@@ -79,6 +65,23 @@
'timeout': self.servers_client.build_timeout})
raise lib_exc.TimeoutException(message)
+
+class TestVolumeSwap(TestVolumeSwapBase):
+ """The test suite for swapping of volume with admin user.
+
+ The following is the scenario outline:
+ 1. Create a volume "volume1" with non-admin.
+ 2. Create a volume "volume2" with non-admin.
+ 3. Boot an instance "instance1" with non-admin.
+ 4. Attach "volume1" to "instance1" with non-admin.
+ 5. Swap volume from "volume1" to "volume2" as admin.
+ 6. Check the swap volume is successful and "volume2"
+ is attached to "instance1" and "volume1" is in available state.
+ 7. Swap volume from "volume2" to "volume1" as admin.
+ 8. Check the swap volume is successful and "volume1"
+ is attached to "instance1" and "volume2" is in available state.
+ """
+
@decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
@utils.services('volume')
def test_volume_swap(self):
@@ -99,8 +102,8 @@
volume1['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume2['id'], 'in-use')
- self._wait_for_server_volume_swap(server['id'], volume1['id'],
- volume2['id'])
+ self.wait_for_server_volume_swap(server['id'], volume1['id'],
+ volume2['id'])
# Verify "volume2" is attached to the server
vol_attachments = self.servers_client.list_volume_attachments(
server['id'])['volumeAttachments']
@@ -114,10 +117,64 @@
volume2['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume1['id'], 'in-use')
- self._wait_for_server_volume_swap(server['id'], volume2['id'],
- volume1['id'])
+ self.wait_for_server_volume_swap(server['id'], volume2['id'],
+ volume1['id'])
# Verify "volume1" is attached to the server
vol_attachments = self.servers_client.list_volume_attachments(
server['id'])['volumeAttachments']
self.assertEqual(1, len(vol_attachments))
self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
+
+
+class AttachVolumeMultiAttachTest(TestVolumeSwapBase):
+ min_microversion = '2.60'
+ max_microversion = 'latest'
+
+ @classmethod
+ def skip_checks(cls):
+ super(AttachVolumeMultiAttachTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.volume_multiattach:
+ raise cls.skipException('Volume multi-attach is not available.')
+
+ @decorators.idempotent_id('e8f8f9d1-d7b7-4cd2-8213-ab85ef697b6e')
+ @utils.services('volume')
+ def test_volume_swap_with_multiattach(self):
+ # Create two volumes.
+ # NOTE(gmann): Volumes are created before server creation so that
+ # volumes cleanup can happen successfully irrespective of which volume
+ # is attached to server.
+ volume1 = self.create_volume(multiattach=True)
+ volume2 = self.create_volume(multiattach=True)
+
+ # Boot server1
+ server1 = self.create_test_server(wait_until='ACTIVE')
+ # Attach volume1 to server1
+ self.attach_volume(server1, volume1)
+ # Boot server2
+ server2 = self.create_test_server(wait_until='ACTIVE')
+ # Attach volume1 to server2
+ self.attach_volume(server2, volume1)
+
+ # Swap volume1 to volume2 on server1, volume1 should remain attached
+ # to server 2
+ self.admin_servers_client.update_attached_volume(
+ server1['id'], volume1['id'], volumeId=volume2['id'])
+ # volume1 will return to in-use after the swap
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume1['id'], 'in-use')
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume2['id'], 'in-use')
+ self.wait_for_server_volume_swap(server1['id'], volume1['id'],
+ volume2['id'])
+
+ # Verify volume2 is attached to server1
+ vol_attachments = self.servers_client.list_volume_attachments(
+ server1['id'])['volumeAttachments']
+ self.assertEqual(1, len(vol_attachments))
+ self.assertIn(volume2['id'], vol_attachments[0]['volumeId'])
+
+ # Verify volume1 is still attached to server2
+ vol_attachments = self.servers_client.list_volume_attachments(
+ server2['id'])['volumeAttachments']
+ self.assertEqual(1, len(vol_attachments))
+ self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 5c4767c..9759be7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -99,6 +99,15 @@
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
+ if CONF.service_available.glance:
+ if CONF.image_feature_enabled.api_v1:
+ cls.images_client = cls.os_primary.image_client
+ elif CONF.image_feature_enabled.api_v2:
+ cls.images_client = cls.os_primary.image_client_v2
+ else:
+ raise lib_exc.InvalidConfiguration(
+ 'Either api_v1 or api_v2 must be True in '
+ '[image-feature-enabled].')
@classmethod
def resource_setup(cls):
@@ -176,11 +185,12 @@
cls.request_microversion)
v2_37_version = api_version_request.APIVersionRequest('2.37')
+ tenant_network = cls.get_tenant_network()
# NOTE(snikitin): since microversion v2.37 'networks' field is required
- if request_version >= v2_37_version and 'networks' not in kwargs:
+ if (request_version >= v2_37_version and 'networks' not in kwargs and
+ not tenant_network):
kwargs['networks'] = 'none'
- tenant_network = cls.get_tenant_network()
body, servers = compute.create_test_server(
cls.os_primary,
validatable,
@@ -254,7 +264,11 @@
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
- """Wrapper utility that returns an image created from the server."""
+ """Wrapper utility that returns an image created from the server.
+
+ If compute microversion >= 2.36, the returned image response will
+ be from the image service API rather than the compute image proxy API.
+ """
name = kwargs.pop('name',
data_utils.rand_name(cls.__name__ + "-image"))
wait_until = kwargs.pop('wait_until', None)
@@ -267,14 +281,21 @@
image_id = image['image_id']
else:
image_id = data_utils.parse_image_id(image.response['location'])
+
+ # The compute image proxy APIs were deprecated in 2.35 so
+ # use the images client directly if the API microversion being
+ # used is >=2.36.
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.36", image.response, "lt"):
+ client = cls.images_client
+ else:
+ client = cls.compute_images_client
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
- cls.compute_images_client.delete_image,
- image_id)
+ client.delete_image, image_id)
if wait_until is not None:
try:
- waiters.wait_for_image_status(cls.compute_images_client,
- image_id, wait_until)
+ waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
# If the image is not found after create_image returned
@@ -292,7 +313,11 @@
image_id=image_id)
else:
raise
- image = cls.compute_images_client.show_image(image_id)['image']
+ image = client.show_image(image_id)
+ # Compute image client returns response wrapped in 'image' element
+ # which is not the case with Glance image client.
+ if 'image' in image:
+ image = image['image']
if wait_until.upper() == 'ACTIVE':
if wait_for_server:
@@ -301,7 +326,7 @@
return image
@classmethod
- def rebuild_server(cls, server_id, validatable=False, **kwargs):
+ def recreate_server(cls, server_id, validatable=False, **kwargs):
"""Destroy an existing class level server and creates a new one
Some test classes use a test server that can be used by multiple
@@ -352,6 +377,13 @@
'VERIFY_RESIZE')
cls.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE')
+ server = cls.servers_client.show_server(server_id)['server']
+ # Nova API > 2.46 no longer includes flavor.id
+ if server['flavor'].get('id'):
+ if new_flavor_id != server['flavor']['id']:
+ msg = ('Flavor id of %s is not equal to new_flavor_id.'
+ % server_id)
+ raise lib_exc.TempestException(msg)
@classmethod
def delete_volume(cls, volume_id):
@@ -422,7 +454,24 @@
volume['id'], 'available')
return volume
- def attach_volume(self, server, volume, device=None, check_reserved=False):
+ def _detach_volume(self, server, volume):
+ """Helper method to detach a volume.
+
+ Ignores 404 responses if the volume or server do not exist, or the
+ volume is already detached from the server.
+ """
+ try:
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+ # Check the status. You can only detach an in-use volume, otherwise
+ # the compute API will return a 400 response.
+ if volume['status'] == 'in-use':
+ self.servers_client.detach_volume(server['id'], volume['id'])
+ except lib_exc.NotFound:
+ # Ignore 404s on detach in case the server is deleted or the volume
+ # is already detached.
+ pass
+
+ def attach_volume(self, server, volume, device=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -431,15 +480,10 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
- :param check_reserved: Consider a status of reserved as valid for
- completion. This is to handle new Cinder attach where we more
- accurately use 'reserved' for things like attaching to a shelved
- server.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
-
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
@@ -449,14 +493,9 @@
self.volumes_client, volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.servers_client.detach_volume,
- server['id'], volume['id'])
- statuses = ['in-use']
- if check_reserved:
- statuses.append('reserved')
+ self.addCleanup(self._detach_volume, server, volume)
waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], statuses)
+ volume['id'], 'in-use')
return attachment
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index efd4f0e..3a474e6 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -30,18 +30,6 @@
class FlavorsV2NegativeTest(base.BaseV2ComputeTest):
- @classmethod
- def setup_clients(cls):
- super(FlavorsV2NegativeTest, cls).setup_clients()
- if CONF.image_feature_enabled.api_v1:
- cls.images_client = cls.os_primary.image_client
- elif CONF.image_feature_enabled.api_v2:
- cls.images_client = cls.os_primary.image_client_v2
- else:
- raise lib_exc.InvalidConfiguration(
- 'Either api_v1 or api_v2 must be True in '
- '[image-feature-enabled].')
-
@decorators.attr(type=['negative'])
@utils.services('image')
@decorators.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
diff --git a/tempest/api/compute/floating_ips/base.py b/tempest/api/compute/floating_ips/base.py
index 142eaec..262a3c1 100644
--- a/tempest/api/compute/floating_ips/base.py
+++ b/tempest/api/compute/floating_ips/base.py
@@ -14,6 +14,10 @@
# under the License.
from tempest.api.compute import base
+from tempest.common import utils
+from tempest import config
+
+CONF = config.CONF
class BaseFloatingIPsTest(base.BaseV2ComputeTest):
@@ -24,3 +28,17 @@
cls.set_network_resources(network=True, subnet=True,
router=True, dhcp=True)
super(BaseFloatingIPsTest, cls).setup_credentials()
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseFloatingIPsTest, cls).skip_checks()
+ if not utils.get_service_list()['network']:
+ raise cls.skipException("network service not enabled.")
+ if not CONF.network_feature_enabled.floating_ips:
+ raise cls.skipException("Floating ips are not available")
+
+ @classmethod
+ def setup_clients(cls):
+ super(BaseFloatingIPsTest, cls).setup_clients()
+ cls.client = cls.floating_ips_client
+ cls.pools_client = cls.floating_ip_pools_client
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 8938570..2adc482 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -16,7 +16,6 @@
import testtools
from tempest.api.compute.floating_ips import base
-from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -27,32 +26,7 @@
class FloatingIPsTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPsTestJSON, cls).skip_checks()
- if not utils.get_service_list()['network']:
- raise cls.skipException("network service not enabled.")
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPsTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
-
- @classmethod
- def resource_setup(cls):
- super(FloatingIPsTestJSON, cls).resource_setup()
-
- # Server creation
- server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
- # Floating IP creation
- body = cls.client.create_floating_ip(
- pool=CONF.network.floating_network_name)['floating_ip']
- cls.addClassResourceCleanup(cls.client.delete_floating_ip, body['id'])
- cls.floating_ip_id = body['id']
- cls.floating_ip = body['ip']
+ max_microversion = '2.35'
@decorators.idempotent_id('f7bfb946-297e-41b8-9e8c-aba8e9bb5194')
def test_allocate_floating_ip(self):
@@ -83,6 +57,25 @@
# Check it was really deleted.
self.client.wait_for_resource_deletion(floating_ip_body['id'])
+
+class FloatingIPsAssociationTestJSON(base.BaseFloatingIPsTest):
+
+ max_microversion = '2.43'
+
+ @classmethod
+ def resource_setup(cls):
+ super(FloatingIPsAssociationTestJSON, cls).resource_setup()
+
+ # Server creation
+ cls.server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = cls.server['id']
+ # Floating IP creation
+ body = cls.client.create_floating_ip(
+ pool=CONF.network.floating_network_name)['floating_ip']
+ cls.addClassResourceCleanup(cls.client.delete_floating_ip, body['id'])
+ cls.floating_ip_id = body['id']
+ cls.floating_ip = body['ip']
+
@decorators.idempotent_id('307efa27-dc6f-48a0-8cd2-162ce3ef0b52')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
index c3d7816..9257458 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
@@ -16,7 +16,6 @@
import testtools
from tempest.api.compute.floating_ips import base
-from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -27,26 +26,12 @@
class FloatingIPsNegativeTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPsNegativeTestJSON, cls).skip_checks()
- if not utils.get_service_list()['network']:
- raise cls.skipException("network service not enabled.")
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPsNegativeTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
+ max_microversion = '2.35'
@classmethod
def resource_setup(cls):
super(FloatingIPsNegativeTestJSON, cls).resource_setup()
- # Server creation
- server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
# Generating a nonexistent floatingIP id
body = cls.client.list_floating_ips()['floating_ips']
floating_ip_ids = [floating_ip['id'] for floating_ip in body]
@@ -77,6 +62,17 @@
self.assertRaises(lib_exc.NotFound, self.client.delete_floating_ip,
self.non_exist_id)
+
+class FloatingIPsAssociationNegativeTestJSON(base.BaseFloatingIPsTest):
+
+ max_microversion = '2.43'
+
+ @classmethod
+ def resource_setup(cls):
+ super(FloatingIPsAssociationNegativeTestJSON, cls).resource_setup()
+ cls.server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = cls.server['id']
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('595fa616-1a71-4670-9614-46564ac49a4c')
def test_associate_nonexistent_floating_ip(self):
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 516c544..944f798 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -13,29 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
-from tempest.common import utils
+from tempest.api.compute.floating_ips import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
-class FloatingIPDetailsTestJSON(base.BaseV2ComputeTest):
+class FloatingIPDetailsTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPDetailsTestJSON, cls).skip_checks()
- if not utils.get_service_list()['network']:
- raise cls.skipException("network service not enabled.")
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPDetailsTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
- cls.pools_client = cls.floating_ip_pools_client
+ max_microversion = '2.35'
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py b/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
index 0ade872..d69248c 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
@@ -13,8 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
-from tempest.common import utils
+from tempest.api.compute.floating_ips import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -23,20 +22,9 @@
CONF = config.CONF
-class FloatingIPDetailsNegativeTestJSON(base.BaseV2ComputeTest):
+class FloatingIPDetailsNegativeTestJSON(base.BaseFloatingIPsTest):
- @classmethod
- def skip_checks(cls):
- super(FloatingIPDetailsNegativeTestJSON, cls).skip_checks()
- if not utils.get_service_list()['network']:
- raise cls.skipException("network service not enabled.")
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
- @classmethod
- def setup_clients(cls):
- super(FloatingIPDetailsNegativeTestJSON, cls).setup_clients()
- cls.client = cls.floating_ips_client
+ max_microversion = '2.35'
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7ab18834-4a4b-4f28-a2c5-440579866695')
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index e62e25e..058e7e6 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -15,6 +15,7 @@
from tempest.api.compute import base
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -86,5 +87,9 @@
# 4 byte utf-8 character.
utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
body = self.client.create_image(self.server_id, name=utf8_name)
- image_id = data_utils.parse_image_id(body.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", body.response, "lt"):
+ image_id = body['image_id']
+ else:
+ image_id = data_utils.parse_image_id(body.response['location'])
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 7ecfa0a..a2e58c9 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -19,6 +19,7 @@
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -51,7 +52,7 @@
self._reset_server()
def _reset_server(self):
- self.__class__.server_id = self.rebuild_server(self.server_id)
+ self.__class__.server_id = self.recreate_server(self.server_id)
@classmethod
def skip_checks(cls):
@@ -105,7 +106,11 @@
self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
self.server_id)
- image_id = data_utils.parse_image_id(image.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
self.client.delete_image(image_id)
@decorators.attr(type=['negative'])
@@ -123,7 +128,11 @@
# Return an error while trying to delete an image what is creating
image = self.create_image_from_server(self.server_id)
- image_id = data_utils.parse_image_id(image.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
self.addCleanup(self._reset_server)
diff --git a/tempest/api/compute/keypairs/base.py b/tempest/api/compute/keypairs/base.py
index 0051810..44da88c 100644
--- a/tempest/api/compute/keypairs/base.py
+++ b/tempest/api/compute/keypairs/base.py
@@ -20,17 +20,16 @@
class BaseKeypairTest(base.BaseV2ComputeTest):
"""Base test case class for all keypair API tests."""
- @classmethod
- def setup_clients(cls):
- super(BaseKeypairTest, cls).setup_clients()
- cls.client = cls.keypairs_client
-
- def _delete_keypair(self, keypair_name, **params):
- self.client.delete_keypair(keypair_name, **params)
+ def _delete_keypair(self, keypair_name, client=None, **params):
+ if not client:
+ client = self.keypairs_client
+ client.delete_keypair(keypair_name, **params)
def create_keypair(self, keypair_name=None,
pub_key=None, keypair_type=None,
- user_id=None):
+ user_id=None, client=None):
+ if not client:
+ client = self.keypairs_client
if keypair_name is None:
keypair_name = data_utils.rand_name(
self.__class__.__name__ + '-keypair')
@@ -43,6 +42,7 @@
if user_id:
kwargs.update({'user_id': user_id})
delete_params['user_id'] = user_id
- body = self.client.create_keypair(**kwargs)['keypair']
- self.addCleanup(self._delete_keypair, keypair_name, **delete_params)
+ body = client.create_keypair(**kwargs)['keypair']
+ self.addCleanup(self._delete_keypair, keypair_name,
+ client, **delete_params)
return body
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 3a54d51..66abb21 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -35,7 +35,7 @@
key_list.append(keypair)
# Fetch all keypairs and verify the list
# has all created keypairs
- fetched_list = self.client.list_keypairs()['keypairs']
+ fetched_list = self.keypairs_client.list_keypairs()['keypairs']
new_list = list()
for keypair in fetched_list:
new_list.append(keypair['keypair'])
@@ -61,7 +61,7 @@
# Keypair should be created, Got details by name and deleted
k_name = data_utils.rand_name('keypair')
self.create_keypair(k_name)
- keypair_detail = self.client.show_keypair(k_name)['keypair']
+ keypair_detail = self.keypairs_client.show_keypair(k_name)['keypair']
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
diff --git a/tempest/api/compute/keypairs/test_keypairs_negative.py b/tempest/api/compute/keypairs/test_keypairs_negative.py
index 205076c..f9050a8 100644
--- a/tempest/api/compute/keypairs/test_keypairs_negative.py
+++ b/tempest/api/compute/keypairs/test_keypairs_negative.py
@@ -34,7 +34,8 @@
def test_keypair_delete_nonexistent_key(self):
# Non-existent key deletion should throw a proper error
k_name = data_utils.rand_name("keypair-non-existent")
- self.assertRaises(lib_exc.NotFound, self.client.delete_keypair,
+ self.assertRaises(lib_exc.NotFound,
+ self.keypairs_client.delete_keypair,
k_name)
@decorators.attr(type=['negative'])
@@ -58,11 +59,11 @@
def test_create_keypair_with_duplicate_name(self):
# Keypairs with duplicate names should not be created
k_name = data_utils.rand_name('keypair')
- self.client.create_keypair(name=k_name)
+ self.keypairs_client.create_keypair(name=k_name)
# Now try the same keyname to create another key
self.assertRaises(lib_exc.Conflict, self.create_keypair,
k_name)
- self.client.delete_keypair(k_name)
+ self.keypairs_client.delete_keypair(k_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1398abe1-4a84-45fb-9294-89f514daff00')
diff --git a/tempest/api/compute/keypairs/test_keypairs_v22.py b/tempest/api/compute/keypairs/test_keypairs_v22.py
index f39bb12..1aff262 100644
--- a/tempest/api/compute/keypairs/test_keypairs_v22.py
+++ b/tempest/api/compute/keypairs/test_keypairs_v22.py
@@ -32,9 +32,9 @@
# Verify whether 'type' is present in keypair create response of
# version 2.2 and it is with default value 'ssh'.
self._check_keypair_type(keypair, keypair_type)
- keypair_detail = self.client.show_keypair(k_name)['keypair']
+ keypair_detail = self.keypairs_client.show_keypair(k_name)['keypair']
self._check_keypair_type(keypair_detail, keypair_type)
- fetched_list = self.client.list_keypairs()['keypairs']
+ fetched_list = self.keypairs_client.list_keypairs()['keypairs']
for keypair in fetched_list:
# Verify whether 'type' is present in keypair list response of
# version 2.2 and it is with default value 'ssh'.
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0248c65..0e8f681 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -78,8 +78,11 @@
return port
- def _check_interface(self, iface, port_id=None, network_id=None,
- fixed_ip=None, mac_addr=None):
+ def _check_interface(self, iface, server_id=None, port_id=None,
+ network_id=None, fixed_ip=None, mac_addr=None):
+ if server_id:
+ iface = waiters.wait_for_interface_status(
+ self.interfaces_client, server_id, iface['port_id'], 'ACTIVE')
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
@@ -109,9 +112,8 @@
network_id = ifs[0]['net_id']
iface = self.interfaces_client.create_interface(
server['id'], net_id=network_id)['interfaceAttachment']
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, network_id=network_id)
+ self._check_interface(iface, server_id=server['id'],
+ network_id=network_id)
return iface
def _test_create_interface_by_port_id(self, server, ifs):
@@ -121,9 +123,8 @@
self.addCleanup(self.ports_client.delete_port, port_id)
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, port_id=port_id)
+ self._check_interface(iface, server_id=server['id'], port_id=port_id,
+ network_id=network_id)
return iface
def _test_create_interface_by_fixed_ips(self, server, ifs):
@@ -140,9 +141,8 @@
server['id'], net_id=network_id,
fixed_ips=fixed_ips)['interfaceAttachment']
self.addCleanup(self.ports_client.delete_port, iface['port_id'])
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, fixed_ip=ip_list[0])
+ self._check_interface(iface, server_id=server['id'],
+ fixed_ip=ip_list[0])
return iface
def _test_show_interface(self, server, ifs):
@@ -271,7 +271,8 @@
# attach the port to the server
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
- self._check_interface(iface, port_id=port_id)
+ self._check_interface(iface, server_id=server['id'],
+ port_id=port_id)
# detach the port from the server; this is a cast in the compute
# API so we have to poll the port until the device_id is unset.
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index a126fd6..d3b1350 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -82,7 +82,9 @@
# A hypervisor may present multiple paths to a tagged disk, so
# there may be duplicated tags in the metadata, use set() to
# remove duplicated tags.
- found_devices = [d['tags'][0] for d in md_dict['devices']]
+ # Some hypervisors might report devices with no tags as well.
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
self.assertEqual(set(found_devices), set(['port-1', 'port-2',
'net-1', 'net-2-100',
'net-2-200', 'boot',
@@ -139,6 +141,7 @@
server = self.create_test_server(
validatable=True,
+ wait_until='ACTIVE',
validation_resources=validation_resources,
config_drive=config_drive_enabled,
adminPass=admin_pass,
@@ -205,6 +208,7 @@
self.addCleanup(self.delete_server, server['id'])
+ server = self.servers_client.show_server(server['id'])['server']
self.ssh_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 6c9b287..393e68f 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -119,8 +119,12 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('74745ad8-b346-45b5-b9b8-509d7447fc1f')
def test_list_servers_by_changes_since_future_date(self):
- # Return an empty list when a date in the future is passed
- changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
+ # Return an empty list when a date in the future is passed.
+ # updated_at field may haven't been set at the point in the boot
+ # process where build_request still exists, so add
+ # {'status': 'ACTIVE'} along with changes-since as filter.
+ changes_since = {'changes-since': '2051-01-01T12:34:00Z',
+ 'status': 'ACTIVE'}
body = self.client.list_servers(**changes_since)
self.assertEmpty(body['servers'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 4cfc665..5c3e9f0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -55,7 +55,7 @@
self.__class__.server_id = server['id']
except Exception:
# Rebuild server if something happened to it during a test
- self.__class__.server_id = self.rebuild_server(
+ self.__class__.server_id = self.recreate_server(
self.server_id, validatable=True)
def tearDown(self):
@@ -75,7 +75,7 @@
@classmethod
def resource_setup(cls):
super(ServerActionsTestJSON, cls).resource_setup()
- cls.server_id = cls.rebuild_server(None, validatable=True)
+ cls.server_id = cls.recreate_server(None, validatable=True)
@decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
@@ -281,45 +281,73 @@
self.assertEqual(self.server_id,
vol_after_rebuild['attachments'][0]['server_id'])
- def _test_resize_server_confirm(self, stop=False):
+ def _test_resize_server_confirm(self, server_id, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
if stop:
- self.client.stop_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id,
+ self.client.stop_server(server_id)
+ waiters.wait_for_server_status(self.client, server_id,
'SHUTOFF')
- self.client.resize_server(self.server_id, self.flavor_ref_alt)
+ self.client.resize_server(server_id, self.flavor_ref_alt)
# NOTE(jlk): Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
- self.addCleanup(self.delete_server, self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id,
+ self.addCleanup(self.delete_server, server_id)
+ waiters.wait_for_server_status(self.client, server_id,
'VERIFY_RESIZE')
- self.client.confirm_resize_server(self.server_id)
+ self.client.confirm_resize_server(server_id)
expected_status = 'SHUTOFF' if stop else 'ACTIVE'
- waiters.wait_for_server_status(self.client, self.server_id,
+ waiters.wait_for_server_status(self.client, server_id,
expected_status)
- server = self.client.show_server(self.server_id)['server']
+ server = self.client.show_server(server_id)['server']
self.assertEqual(self.flavor_ref_alt, server['flavor']['id'])
if stop:
# NOTE(mriedem): tearDown requires the server to be started.
- self.client.start_server(self.server_id)
+ self.client.start_server(server_id)
@decorators.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm(self):
- self._test_resize_server_confirm(stop=False)
+ self._test_resize_server_confirm(self.server_id, stop=False)
+
+ @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
+ @decorators.related_bug('1728603')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @utils.services('volume')
+ def test_resize_volume_backed_server_confirm(self):
+ # We have to create a new server that is volume-backed since the one
+ # from setUp is not volume-backed.
+ server = self.create_test_server(
+ volume_backed=True, wait_until='ACTIVE')
+ self._test_resize_server_confirm(server['id'])
+ if CONF.compute_feature_enabled.console_output:
+ # Now do something interactive with the guest like get its console
+ # output; we don't actually care about the output,
+ # just that it doesn't raise an error.
+ self.client.get_console_output(server['id'])
+ if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.ssh_user,
+ password=None,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.client)
+ linux_client.validate_authentication()
@decorators.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm_from_stopped(self):
- self._test_resize_server_confirm(stop=True)
+ self._test_resize_server_confirm(self.server_id, stop=True)
@decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
@@ -554,6 +582,12 @@
compute.shelve_server(self.client, self.server_id,
force_shelve_offload=True)
+ def _unshelve_server():
+ server_info = self.client.show_server(self.server_id)['server']
+ if 'SHELVED' in server_info['status']:
+ self.client.unshelve_server(self.server_id)
+ self.addOnException(_unshelve_server)
+
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index e7591a5..e6a668a 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -21,19 +21,14 @@
class ServerPasswordTestJSON(base.BaseV2ComputeTest):
@classmethod
- def setup_clients(cls):
- super(ServerPasswordTestJSON, cls).setup_clients()
- cls.client = cls.servers_client
-
- @classmethod
def resource_setup(cls):
super(ServerPasswordTestJSON, cls).resource_setup()
cls.server = cls.create_test_server(wait_until="ACTIVE")
@decorators.idempotent_id('f83b582f-62a8-4f22-85b0-0dee50ff783a')
def test_get_server_password(self):
- self.client.show_password(self.server['id'])
+ self.servers_client.show_password(self.server['id'])
@decorators.idempotent_id('f8229e8b-b625-4493-800a-bde86ac611ea')
def test_delete_server_password(self):
- self.client.delete_password(self.server['id'])
+ self.servers_client.delete_password(self.server['id'])
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 2f0f5ee..6f32b46 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -44,7 +44,6 @@
def setup_clients(cls):
super(ServerPersonalityTestJSON, cls).setup_clients()
cls.client = cls.servers_client
- cls.user_client = cls.limits_client
@decorators.idempotent_id('3cfe87fd-115b-4a02-b942-7dc36a337fdf')
def test_create_server_with_personality(self):
@@ -104,7 +103,7 @@
# number of files are injected into the server.
file_contents = 'This is a test file.'
personality = []
- limits = self.user_client.show_limits()['limits']
+ limits = self.limits_client.show_limits()['limits']
max_file_limit = limits['absolute']['maxPersonality']
if max_file_limit == -1:
raise self.skipException("No limit for personality files")
@@ -123,7 +122,7 @@
# Server should be created successfully if maximum allowed number of
# files is injected into the server during creation.
file_contents = 'This is a test file.'
- limits = self.user_client.show_limits()['limits']
+ limits = self.limits_client.show_limits()['limits']
max_file_limit = limits['absolute']['maxPersonality']
if max_file_limit == -1:
raise self.skipException("No limit for personality files")
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index c9ee671..2904976 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -167,6 +167,18 @@
server = self.client.show_server(server['id'])['server']
self.assertEqual('2001:2001::3', server['accessIPv6'])
+ @decorators.related_bug('1730756')
+ @decorators.idempotent_id('defbaca5-d611-49f5-ae21-56ee25d2db49')
+ def test_create_server_specify_multibyte_character_name(self):
+ # prefix character is:
+ # http://unicode.org/cldr/utility/character.jsp?a=20A1
+
+ # We use a string with 3 byte utf-8 character due to nova
+ # will return 400(Bad Request) if we attempt to send a name which has
+ # 4 byte utf-8 character.
+ utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
+ self.create_test_server(name=utf8_name, wait_until='ACTIVE')
+
class ServerShowV247Test(base.BaseV2ComputeTest):
min_microversion = '2.47'
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 8170b28..9b545af 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -37,7 +37,7 @@
waiters.wait_for_server_status(self.client, self.server_id,
'ACTIVE')
except Exception:
- self.__class__.server_id = self.rebuild_server(self.server_id)
+ self.__class__.server_id = self.recreate_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
@@ -477,6 +477,12 @@
# shelve a shelved server.
compute.shelve_server(self.client, self.server_id)
+ def _unshelve_server():
+ server_info = self.client.show_server(self.server_id)['server']
+ if 'SHELVED' in server_info['status']:
+ self.client.unshelve_server(self.server_id)
+ self.addOnException(_unshelve_server)
+
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
@@ -551,7 +557,7 @@
waiters.wait_for_server_status(self.servers_client, self.server_id,
'ACTIVE')
except Exception:
- self.__class__.server_id = self.rebuild_server(self.server_id)
+ self.__class__.server_id = self.recreate_server(self.server_id)
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index 20923a8..c4e2400 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -28,11 +28,6 @@
cls.set_network_resources()
super(VirtualInterfacesNegativeTestJSON, cls).setup_credentials()
- @classmethod
- def setup_clients(cls):
- super(VirtualInterfacesNegativeTestJSON, cls).setup_clients()
- cls.client = cls.servers_client
-
@decorators.attr(type=['negative'])
@decorators.idempotent_id('64ebd03c-1089-4306-93fa-60f5eb5c803c')
@utils.services('network')
@@ -41,5 +36,5 @@
# for an invalid server_id
invalid_server_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
- self.client.list_virtual_interfaces,
+ self.servers_client.list_virtual_interfaces,
invalid_server_id)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 9bef80f..caa445d 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -13,8 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common import compute
+from tempest.common import utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
@@ -23,12 +26,12 @@
CONF = config.CONF
-class AttachVolumeTestJSON(base.BaseV2ComputeTest):
- max_microversion = '2.19'
+class BaseAttachVolumeTest(base.BaseV2ComputeTest):
+ """Base class for the attach volume tests in this module."""
@classmethod
def skip_checks(cls):
- super(AttachVolumeTestJSON, cls).skip_checks()
+ super(BaseAttachVolumeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@@ -36,11 +39,11 @@
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
- super(AttachVolumeTestJSON, cls).setup_credentials()
+ super(BaseAttachVolumeTest, cls).setup_credentials()
@classmethod
def resource_setup(cls):
- super(AttachVolumeTestJSON, cls).resource_setup()
+ super(BaseAttachVolumeTest, cls).resource_setup()
cls.device = CONF.compute.volume_device_name
def _create_server(self):
@@ -58,6 +61,9 @@
server['id'])['addresses']
return server, validation_resources
+
+class AttachVolumeTestJSON(BaseAttachVolumeTest):
+
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
@@ -149,7 +155,7 @@
self.volumes_client, attachment['volumeId'], 'available')
-class AttachVolumeShelveTestJSON(AttachVolumeTestJSON):
+class AttachVolumeShelveTestJSON(BaseAttachVolumeTest):
"""Testing volume with shelved instance.
This test checks the attaching and detaching volumes from
@@ -223,8 +229,7 @@
num_vol = self._count_volumes(server, validation_resources)
self._shelve_server(server, validation_resources)
attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device),
- check_reserved=True)
+ device=('/dev/%s' % self.device))
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(
@@ -250,8 +255,7 @@
self._shelve_server(server, validation_resources)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device),
- check_reserved=True)
+ self.attach_volume(server, volume, device=('/dev/%s' % self.device))
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
@@ -260,3 +264,177 @@
# volume(s)
self._unshelve_server_and_check_volumes(
server, validation_resources, num_vol)
+
+
+class AttachVolumeMultiAttachTest(BaseAttachVolumeTest):
+ min_microversion = '2.60'
+ max_microversion = 'latest'
+
+ @classmethod
+ def skip_checks(cls):
+ super(AttachVolumeMultiAttachTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.volume_multiattach:
+ raise cls.skipException('Volume multi-attach is not available.')
+
+ def _attach_volume_to_servers(self, volume, servers):
+ """Attaches the given volume to the list of servers.
+
+ :param volume: The multiattach volume to use.
+ :param servers: list of server instances on which the volume will be
+ attached
+ :returns: dict of server ID to volumeAttachment dict entries
+ """
+ attachments = {}
+ for server in servers:
+ # map the server id to the volume attachment
+ attachments[server['id']] = self.attach_volume(server, volume)
+ # NOTE(mriedem): In the case of multi-attach, after the first
+ # attach the volume will be in-use. On the second attach, nova will
+ # 'reserve' the volume which puts it back into 'attaching' status
+ # and then the volume shouldn't go back to in-use until the compute
+ # actually attaches the server to the volume.
+ return attachments
+
+ def _detach_multiattach_volume(self, volume_id, server_id):
+ """Detaches a multiattach volume from the given server.
+
+ Depending on the number of attachments the volume has, this method
+ will wait for the volume to go to back to 'in-use' status if there are
+ more attachments or 'available' state if there are no more attachments.
+ """
+ # Count the number of attachments before starting the detach.
+ volume = self.volumes_client.show_volume(volume_id)['volume']
+ attachments = volume['attachments']
+ wait_status = 'in-use' if len(attachments) > 1 else 'available'
+ # Now detach the volume from the given server.
+ self.servers_client.detach_volume(server_id, volume_id)
+ # Now wait for the volume status to change.
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume_id, wait_status)
+
+ def _create_multiattach_volume(self, bootable=False):
+ kwargs = {}
+ if bootable:
+ kwargs['image_ref'] = CONF.compute.image_ref
+ return self.create_volume(multiattach=True, **kwargs)
+
+ def _create_and_multiattach(self):
+ """Creates two server instances and a volume and attaches to both.
+
+ :returns: A three-item tuple of the list of created servers,
+ the created volume, and dict of server ID to volumeAttachment
+ dict entries
+ """
+ servers = []
+ for x in range(2):
+ name = 'multiattach-server-%i' % x
+ servers.append(self.create_test_server(name=name))
+
+ # Now wait for the servers to be ACTIVE.
+ for server in servers:
+ waiters.wait_for_server_status(self.servers_client, server['id'],
+ 'ACTIVE')
+
+ volume = self._create_multiattach_volume()
+
+ # Attach the volume to the servers
+ attachments = self._attach_volume_to_servers(volume, servers)
+ return servers, volume, attachments
+
+ @decorators.idempotent_id('8d5853f7-56e7-4988-9b0c-48cea3c7049a')
+ def test_list_get_volume_attachments_multiattach(self):
+ # Attach a single volume to two servers.
+ servers, volume, attachments = self._create_and_multiattach()
+
+ # List attachments from the volume and make sure the server uuids
+ # are in that list.
+ vol_attachments = self.volumes_client.show_volume(
+ volume['id'])['volume']['attachments']
+ attached_server_ids = [attachment['server_id']
+ for attachment in vol_attachments]
+ self.assertEqual(2, len(attached_server_ids))
+
+ # List Volume attachment of the servers
+ for server in servers:
+ self.assertIn(server['id'], attached_server_ids)
+ vol_attachments = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(1, len(vol_attachments))
+ attachment = attachments[server['id']]
+ self.assertDictEqual(attachment, vol_attachments[0])
+ # Detach the volume from this server.
+ self._detach_multiattach_volume(volume['id'], server['id'])
+
+ def _boot_from_multiattach_volume(self):
+ """Boots a server from a multiattach volume.
+
+ The volume will not be deleted when the server is deleted.
+
+ :returns: 2-item tuple of (server, volume)
+ """
+ volume = self._create_multiattach_volume(bootable=True)
+ # Now create a server from the bootable volume.
+ bdm = [{
+ 'uuid': volume['id'],
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': False}]
+ server = self.create_test_server(
+ image_id='', block_device_mapping_v2=bdm, wait_until='ACTIVE')
+ # Assert the volume is attached to the server.
+ attachments = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(1, len(attachments))
+ self.assertEqual(volume['id'], attachments[0]['volumeId'])
+ return server, volume
+
+ @decorators.idempotent_id('65e33aa2-185b-44c8-b22e-e524973ed625')
+ def test_boot_from_multiattach_volume(self):
+ """Simple test to boot an instance from a multiattach volume."""
+ self._boot_from_multiattach_volume()
+
+ @utils.services('image')
+ @decorators.idempotent_id('885ac48a-2d7a-40c5-ae8b-1993882d724c')
+ def test_snapshot_volume_backed_multiattach(self):
+ """Boots a server from a multiattach volume and snapshots the server.
+
+ Creating the snapshot of the server will also create a snapshot of
+ the volume.
+ """
+ server, volume = self._boot_from_multiattach_volume()
+ # Create a snapshot of the server (and volume implicitly).
+ self.create_image_from_server(
+ server['id'], name='multiattach-snapshot',
+ wait_until='active', wait_for_server=True)
+ # TODO(mriedem): Make sure the volume snapshot exists. This requires
+ # adding the volume snapshots client to BaseV2ComputeTest.
+ # Delete the server, wait for it to be gone, and make sure the volume
+ # still exists.
+ self.servers_client.delete_server(server['id'])
+ waiters.wait_for_server_termination(self.servers_client, server['id'])
+ # Delete the volume and cascade the delete of the volume snapshot.
+ self.volumes_client.delete_volume(volume['id'], cascade=True)
+ # Now we have to wait for the volume to be gone otherwise the normal
+ # teardown will fail since it will race with our call and the snapshot
+ # might still exist.
+ self.volumes_client.wait_for_resource_deletion(volume['id'])
+
+ @decorators.idempotent_id('f01c7169-a124-4fc7-ae60-5e380e247c9c')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ def test_resize_server_with_multiattached_volume(self):
+ # Attach a single volume to multiple servers, then resize the servers
+ servers, volume, _ = self._create_and_multiattach()
+
+ for server in servers:
+ self.resize_server(server['id'], self.flavor_ref_alt)
+
+ for server in servers:
+ self._detach_multiattach_volume(volume['id'], server['id'])
+
+ # TODO(mriedem): Might be interesting to create a bootable multiattach
+ # volume with delete_on_termination=True, create server1 from the
+ # volume, then attach it to server2, and then delete server1 in which
+ # case the volume won't be deleted because it's still attached to
+ # server2 and make sure the volume is still attached to server2.
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index eabb907..7a74869 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -41,3 +41,18 @@
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('aab919e2-d992-4cbb-a4ed-745c2475398c')
+ def test_attach_attached_volume_to_same_server(self):
+ # Test attaching the same volume to the same instance once
+ # it's already attached. The nova/cinder validation for this differs
+ # depending on whether or not cinder v3.27 is being used to attach
+ # the volume to the instance.
+ server = self.create_test_server(wait_until='ACTIVE')
+ volume = self.create_volume()
+
+ self.attach_volume(server, volume)
+
+ self.assertRaises(lib_exc.BadRequest,
+ self.attach_volume, server, volume)
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index 59fc4d8..947706e 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -23,15 +23,15 @@
@classmethod
def resource_setup(cls):
super(EndPointsTestJSON, cls).resource_setup()
- cls.service_ids = list()
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
service_data = cls.services_client.create_service(
name=s_name, type=s_type,
description=s_description)['OS-KSADM:service']
+ cls.addClassResourceCleanup(cls.services_client.delete_service,
+ service_data['id'])
cls.service_id = service_data['id']
- cls.service_ids.append(cls.service_id)
# Create endpoints so as to use for LIST and GET test cases
cls.setup_endpoints = list()
for _ in range(2):
@@ -43,18 +43,12 @@
publicurl=url,
adminurl=url,
internalurl=url)['endpoint']
+ cls.addClassResourceCleanup(cls.endpoints_client.delete_endpoint,
+ endpoint['id'])
# list_endpoints() will return 'enabled' field
endpoint['enabled'] = True
cls.setup_endpoints.append(endpoint)
- @classmethod
- def resource_cleanup(cls):
- for e in cls.setup_endpoints:
- cls.endpoints_client.delete_endpoint(e['id'])
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
def test_list_endpoints(self):
# Get a list of endpoints
diff --git a/tempest/api/identity/admin/v2/test_roles.py b/tempest/api/identity/admin/v2/test_roles.py
index 124bb5f..9736a76 100644
--- a/tempest/api/identity/admin/v2/test_roles.py
+++ b/tempest/api/identity/admin/v2/test_roles.py
@@ -28,14 +28,11 @@
for _ in range(5):
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.roles_client.delete_role, role['id'])
cls.roles.append(role)
- @classmethod
- def resource_cleanup(cls):
- super(RolesTestJSON, cls).resource_cleanup()
- for role in cls.roles:
- cls.roles_client.delete_role(role['id'])
-
def _get_role_params(self):
user = self.setup_test_user()
tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
diff --git a/tempest/api/identity/admin/v2/test_tokens.py b/tempest/api/identity/admin/v2/test_tokens.py
index 6b30d23..6ce1a8b 100644
--- a/tempest/api/identity/admin/v2/test_tokens.py
+++ b/tempest/api/identity/admin/v2/test_tokens.py
@@ -112,6 +112,8 @@
@decorators.idempotent_id('ca3ea6f7-ed08-4a61-adbd-96906456ad31')
def test_list_endpoints_for_token(self):
+ tempest_services = ['keystone', 'nova', 'neutron', 'swift', 'cinder',
+ 'neutron']
# get a token for the user
creds = self.os_primary.credentials
username = creds.username
@@ -125,9 +127,10 @@
self.assertIsInstance(endpoints, list)
# Store list of service names
service_names = [e['name'] for e in endpoints]
- # Get the list of available services.
+ # Get the list of available services. Keystone is always available.
available_services = [s[0] for s in list(
- CONF.service_available.items()) if s[1] is True]
+ CONF.service_available.items()) if s[1] is True] + ['keystone']
# Verify that all available services are present.
- for service in available_services:
- self.assertIn(service, service_names)
+ for service in tempest_services:
+ if service in available_services:
+ self.assertIn(service, service_names)
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index 15b2008..ba19ff7 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -32,21 +32,18 @@
u_email = '%s@testmail.tm' % u_name
u_password = data_utils.rand_password()
for _ in range(2):
- cls.project = cls.projects_client.create_project(
+ project = cls.projects_client.create_project(
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'))['project']
- cls.projects.append(cls.project['id'])
+ cls.addClassResourceCleanup(
+ cls.projects_client.delete_project, project['id'])
+ cls.projects.append(project['id'])
cls.user_body = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])['user']
-
- @classmethod
- def resource_cleanup(cls):
- cls.users_client.delete_user(cls.user_body['id'])
- for p in cls.projects:
- cls.projects_client.delete_project(p)
- super(CredentialsTestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(
+ cls.users_client.delete_user, cls.user_body['id'])
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index f731697..c4e0622 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -37,18 +37,6 @@
super(DomainConfigurationTestJSON, cls).setup_clients()
cls.client = cls.domain_config_client
- @classmethod
- def resource_setup(cls):
- super(DomainConfigurationTestJSON, cls).resource_setup()
- cls.group = cls.groups_client.create_group(
- name=data_utils.rand_name('group'),
- description=data_utils.rand_name('group-desc'))['group']
-
- @classmethod
- def resource_cleanup(cls):
- cls.groups_client.delete_group(cls.group['id'])
- super(DomainConfigurationTestJSON, cls).resource_cleanup()
-
def _create_domain_and_config(self, config):
domain = self.setup_test_domain()
config = self.client.create_domain_config(domain['id'], **config)[
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 8a39a44..97a1f36 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -35,19 +35,6 @@
domain = cls.create_domain(enabled=i < 2)
cls.setup_domains.append(domain)
- @classmethod
- def resource_cleanup(cls):
- for domain in cls.setup_domains:
- cls._delete_domain(domain['id'])
- super(DomainsTestJSON, cls).resource_cleanup()
-
- @classmethod
- def _delete_domain(cls, domain_id):
- # It is necessary to disable the domain before deleting,
- # or else it would result in unauthorized error
- cls.domains_client.update_domain(domain_id, enabled=False)
- cls.domains_client.delete_domain(domain_id)
-
@decorators.idempotent_id('8cf516ef-2114-48f1-907b-d32726c734d4')
def test_list_domains(self):
# Test to list domains
@@ -93,7 +80,7 @@
domain = self.domains_client.create_domain(
name=d_name, description=d_desc)['domain']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_domain, domain['id'])
+ self.delete_domain, domain['id'])
self.assertIn('description', domain)
self.assertIn('name', domain)
self.assertIn('enabled', domain)
@@ -166,7 +153,7 @@
# Create domain only with name
d_name = data_utils.rand_name('domain')
domain = self.domains_client.create_domain(name=d_name)['domain']
- self.addCleanup(self._delete_domain, domain['id'])
+ self.addCleanup(self.delete_domain, domain['id'])
expected_data = {'name': d_name, 'enabled': True}
self.assertEqual('', domain['description'])
self.assertDictContainsSubset(expected_data, domain)
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 49dbba1..eef93c2 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -15,6 +15,7 @@
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -28,11 +29,12 @@
@classmethod
def resource_setup(cls):
super(EndPointGroupsTest, cls).resource_setup()
- cls.service_ids = list()
cls.endpoint_groups = list()
# Create endpoint group so as to use it for LIST test
service_id = cls._create_service()
+ cls.addClassResourceCleanup(
+ cls.services_client.delete_service, service_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
@@ -42,18 +44,12 @@
name=name,
description=description,
filters=filters)['endpoint_group']
+ cls.addClassResourceCleanup(
+ cls.client.delete_endpoint_group, endpoint_group['id'])
cls.endpoint_groups.append(endpoint_group)
@classmethod
- def resource_cleanup(cls):
- for e in cls.endpoint_groups:
- cls.client.delete_endpoint_group(e['id'])
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointGroupsTest, cls).resource_cleanup()
-
- @classmethod
def _create_service(cls):
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
@@ -64,7 +60,6 @@
description=s_description))
service_id = service_data['service']['id']
- cls.service_ids.append(service_id)
return service_id
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
@@ -78,6 +73,9 @@
name=name,
description=description,
filters=filters)['endpoint_group']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_endpoint_group, endpoint_group['id'])
self.endpoint_groups.append(endpoint_group)
@@ -115,7 +113,6 @@
# Deleting the endpoint group created in this method
self.client.delete_endpoint_group(endpoint_group['id'])
- self.endpoint_groups.remove(endpoint_group)
# Checking whether endpoint group is deleted successfully
fetched_endpoints = \
@@ -136,10 +133,12 @@
name=name,
description=description,
filters=filters)['endpoint_group']
- self.endpoint_groups.append(endpoint_group)
+ self.addCleanup(self.client.delete_endpoint_group,
+ endpoint_group['id'])
# Creating new attr values to update endpoint group
service2_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service2_id)
name2 = data_utils.rand_name('service_group2')
description2 = data_utils.rand_name('description2')
filters = {'service_id': service2_id}
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 5d48f68..874aaa4 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -15,6 +15,7 @@
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -34,12 +35,18 @@
interfaces = ['public', 'internal']
cls.setup_endpoint_ids = list()
for i in range(2):
- cls._create_service()
+ service = cls._create_service()
+ cls.service_ids.append(service['id'])
+ cls.addClassResourceCleanup(
+ cls.services_client.delete_service, service['id'])
+
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.client.create_endpoint(
service_id=cls.service_ids[i], interface=interfaces[i],
url=url, region=region, enabled=True)['endpoint']
+ cls.addClassResourceCleanup(
+ cls.client.delete_endpoint, endpoint['id'])
cls.setup_endpoint_ids.append(endpoint['id'])
@classmethod
@@ -53,17 +60,7 @@
service_data = (
cls.services_client.create_service(name=s_name, type=s_type,
description=s_description))
- service = service_data['service']
- cls.service_ids.append(service['id'])
- return service
-
- @classmethod
- def resource_cleanup(cls):
- for e in cls.setup_endpoint_ids:
- cls.client.delete_endpoint(e)
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointsTestJSON, cls).resource_cleanup()
+ return service_data['service']
@decorators.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
def test_list_endpoints(self):
@@ -114,8 +111,8 @@
interface=interface,
url=url, region=region,
enabled=True)['endpoint']
-
- self.setup_endpoint_ids.append(endpoint['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_endpoint, endpoint['id'])
# Asserting Create Endpoint response body
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
@@ -137,7 +134,6 @@
# Deleting the endpoint created in this method
self.client.delete_endpoint(endpoint['id'])
- self.setup_endpoint_ids.remove(endpoint['id'])
# Checking whether endpoint is deleted successfully
fetched_endpoints = self.client.list_endpoints()['endpoints']
@@ -147,8 +143,20 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('37e8f15e-ee7c-4657-a1e7-f6b61e375eff')
def test_update_endpoint(self):
- # Creating an endpoint so as to check update endpoint
- # with new values
+ # NOTE(zhufl) Service2 should be created before endpoint_for_update
+ # is created, because Service2 must be deleted after
+ # endpoint_for_update is deleted, otherwise we will get a 404 error
+ # when deleting endpoint_for_update if endpoint's service is deleted.
+
+ # Creating service for updating endpoint with new service ID
+ s_name = data_utils.rand_name('service')
+ s_type = data_utils.rand_name('type')
+ s_description = data_utils.rand_name('description')
+ service2 = self._create_service(s_name=s_name, s_type=s_type,
+ s_description=s_description)
+ self.addCleanup(self.services_client.delete_service, service2['id'])
+
+ # Creating an endpoint so as to check update endpoint with new values
region1 = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
@@ -158,12 +166,7 @@
url=url1, region=region1,
enabled=True)['endpoint'])
self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
- # Creating service so as update endpoint with new service ID
- s_name = data_utils.rand_name('service')
- s_type = data_utils.rand_name('type')
- s_description = data_utils.rand_name('description')
- service2 = self._create_service(s_name=s_name, s_type=s_type,
- s_description=s_description)
+
# Updating endpoint with new values
region2 = data_utils.rand_name('region')
url2 = data_utils.rand_url()
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 70dd7b5..d54e222 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -30,7 +30,6 @@
@classmethod
def resource_setup(cls):
super(EndpointsNegativeTestJSON, cls).resource_setup()
- cls.service_ids = list()
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
@@ -38,14 +37,10 @@
cls.services_client.create_service(name=s_name, type=s_type,
description=s_description)
['service'])
- cls.service_id = service_data['id']
- cls.service_ids.append(cls.service_id)
+ cls.addClassResourceCleanup(cls.services_client.delete_service,
+ service_data['id'])
- @classmethod
- def resource_cleanup(cls):
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndpointsNegativeTestJSON, cls).resource_cleanup()
+ cls.service_id = service_data['id']
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ac6c137e-4d3d-448f-8c83-4f13d0942651')
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 17db3ea..507810b 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -28,13 +28,6 @@
super(GroupsV3TestJSON, cls).resource_setup()
cls.domain = cls.create_domain()
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the domains created in the setup
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(GroupsV3TestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('2e80343b-6c81-4ac3-88c7-452f3e9d5129')
def test_group_create_update_get(self):
name = data_utils.rand_name('Group')
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index 8b687cd..68c0225 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -36,22 +36,19 @@
data_utils.rand_name('project-'),
description=data_utils.rand_name('project-desc-'),
domain_id=cls.domain['id'])['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.project['id'])
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group-'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
+ cls.addClassResourceCleanup(cls.groups_client.delete_group,
+ cls.group['id'])
cls.user = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
-
- @classmethod
- def resource_cleanup(cls):
- cls.groups_client.delete_group(cls.group['id'])
- cls.users_client.delete_user(cls.user['id'])
- cls.projects_client.delete_project(cls.project['id'])
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(InheritsV3TestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user['id'])
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 7e70c14..82664e8 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -27,35 +27,27 @@
# Create a domain
cls.domain = cls.create_domain()
# Create project with domain
- cls.projects = list()
cls.p1_name = data_utils.rand_name('project')
cls.p1 = cls.projects_client.create_project(
cls.p1_name, enabled=False,
domain_id=cls.domain['id'])['project']
- cls.projects.append(cls.p1)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p1['id'])
cls.project_ids.append(cls.p1['id'])
# Create default project
p2_name = data_utils.rand_name('project')
cls.p2 = cls.projects_client.create_project(p2_name)['project']
- cls.projects.append(cls.p2)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p2['id'])
cls.project_ids.append(cls.p2['id'])
# Create a new project (p3) using p2 as parent project
p3_name = data_utils.rand_name('project')
cls.p3 = cls.projects_client.create_project(
p3_name, parent_id=cls.p2['id'])['project']
- cls.projects.append(cls.p3)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p3['id'])
cls.project_ids.append(cls.p3['id'])
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the projects created during setup in inverse order
- for project in reversed(cls.projects):
- cls.projects_client.delete_project(project['id'])
- # Cleanup the domain created during setup
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(ListProjectsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
def test_list_projects(self):
# List projects
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 506c729..c69e4c8 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -47,24 +47,18 @@
cls.domain_enabled_user = cls.users_client.create_user(
name=u1_name, password=alt_password,
email=cls.alt_email, domain_id=cls.domain['id'])['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.domain_enabled_user['id'])
cls.users.append(cls.domain_enabled_user)
# Create default not enabled user
u2_name = data_utils.rand_name('test_user')
cls.non_domain_enabled_user = cls.users_client.create_user(
name=u2_name, password=alt_password,
email=cls.alt_email, enabled=False)['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.non_domain_enabled_user['id'])
cls.users.append(cls.non_domain_enabled_user)
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the users created during setup
- for user in cls.users:
- cls.users_client.delete_user(user['id'])
- # Cleanup the domain created during setup
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- super(UsersV3TestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
def test_list_user_domains(self):
# List users with domain
diff --git a/tempest/api/identity/admin/v3/test_oauth_consumers.py b/tempest/api/identity/admin/v3/test_oauth_consumers.py
index 970ead3..062cce5 100644
--- a/tempest/api/identity/admin/v3/test_oauth_consumers.py
+++ b/tempest/api/identity/admin/v3/test_oauth_consumers.py
@@ -17,7 +17,7 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest.lib import exceptions as exceptions
+from tempest.lib import exceptions
class OAUTHConsumersV3Test(base.BaseIdentityV3AdminTest):
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 1b1d3f7..ac23067 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -87,7 +87,8 @@
# project and domain APIs
projects_list = self.projects_client.list_projects(
params={'is_domain': True})['projects']
- self.assertIn(project, projects_list)
+ project_ids = [p['id'] for p in projects_list]
+ self.assertIn(project['id'], project_ids)
# The domains API return different attributes for the entity, so we
# compare the entities IDs
@@ -205,3 +206,31 @@
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
+
+ @decorators.idempotent_id('d1db68b6-aebe-4fa0-b79d-d724d2e21162')
+ def test_project_get_equals_list(self):
+ fields = ['parent_id', 'is_domain', 'description', 'links',
+ 'name', 'enabled', 'domain_id', 'id', 'tags']
+
+ # Tags must be unique, keystone API will reject duplicates
+ tags = ['a', 'c', 'b', 'd']
+
+ # Create a Project, cleanup is handled in the helper
+ project = self.setup_test_project(tags=tags)
+
+ # Show and list for the project
+ project_get = self.projects_client.show_project(
+ project['id'])['project']
+ _projects = self.projects_client.list_projects()['projects']
+ project_list = next(x for x in _projects if x['id'] == project['id'])
+
+ # Assert the list of fields is correct (one is enough to check here)
+ self.assertSetEqual(set(fields), set(project_get.keys()))
+
+ # Ensure the set of tags is identical and match the expected one
+ get_tags = set(project_get.pop("tags"))
+ self.assertSetEqual(get_tags, set(project_list.pop("tags")))
+ self.assertSetEqual(get_tags, set(tags))
+
+ # Ensure all other fields are identical
+ self.assertDictEqual(project_get, project_list)
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index d00e408..f22a528 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -34,14 +34,10 @@
r_description = data_utils.rand_name('description')
region = cls.client.create_region(
description=r_description)['region']
+ cls.addClassResourceCleanup(
+ cls.client.delete_region, region['id'])
cls.setup_regions.append(region)
- @classmethod
- def resource_cleanup(cls):
- for r in cls.setup_regions:
- cls.client.delete_region(r['id'])
- super(RegionsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('56186092-82e4-43f2-b954-91013218ba42')
def test_create_update_get_delete_region(self):
# Create region
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index ec904e6..69cac33 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -32,6 +32,8 @@
for _ in range(3):
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
+ cls.addClassResourceCleanup(cls.roles_client.delete_role,
+ role['id'])
cls.roles.append(role)
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
@@ -42,29 +44,23 @@
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'),
domain_id=cls.domain['id'])['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.project['id'])
cls.group_body = cls.groups_client.create_group(
name=data_utils.rand_name('Group'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
+ cls.addClassResourceCleanup(cls.groups_client.delete_group,
+ cls.group_body['id'])
cls.user_body = cls.users_client.create_user(
name=u_name, description=u_desc, password=cls.u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user_body['id'])
cls.role = cls.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
-
- @classmethod
- def resource_cleanup(cls):
- cls.roles_client.delete_role(cls.role['id'])
- cls.groups_client.delete_group(cls.group_body['id'])
- cls.users_client.delete_user(cls.user_body['id'])
- cls.projects_client.delete_project(cls.project['id'])
- # NOTE(harika-vakadi): It is necessary to disable the domain
- # before deleting,or else it would result in unauthorized error
- cls.domains_client.update_domain(cls.domain['id'], enabled=False)
- cls.domains_client.delete_domain(cls.domain['id'])
- for role in cls.roles:
- cls.roles_client.delete_role(role['id'])
- super(RolesV3TestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.roles_client.delete_role,
+ cls.role['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
@@ -342,14 +338,13 @@
# domain role to a global one
self._create_implied_role(domain_role1['id'], self.role['id'])
- if CONF.identity_feature_enabled.forbid_global_implied_dsr:
- # The contrary is not true: we can't create an inference rule
- # from a global role to a domain role
- self.assertRaises(
- lib_exc.Forbidden,
- self.roles_client.create_role_inference_rule,
- self.role['id'],
- domain_role1['id'])
+ # The contrary is not true: we can't create an inference rule
+ # from a global role to a domain role
+ self.assertRaises(
+ lib_exc.Forbidden,
+ self.roles_client.create_role_inference_rule,
+ self.role['id'],
+ domain_role1['id'])
@decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
def test_assignments_for_domain_roles(self):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 6343ea8..0845407 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -26,6 +26,8 @@
class TokensV3TestJSON(base.BaseIdentityV3AdminTest):
+ credentials = ['primary', 'admin', 'alt']
+
@decorators.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
def test_tokens(self):
# Valid user's token is authenticated
@@ -163,12 +165,78 @@
# Get available project scopes
available_projects = self.client.list_auth_projects()['projects']
- # create list to save fetched project's id
+ # Create list to save fetched project IDs
fetched_project_ids = [i['id'] for i in available_projects]
# verifying the project ids in list
missing_project_ids = \
[p for p in assigned_project_ids if p not in fetched_project_ids]
self.assertEmpty(missing_project_ids,
- "Failed to find project_id %s in fetched list" %
+ "Failed to find project_ids %s in fetched list" %
', '.join(missing_project_ids))
+
+ @decorators.idempotent_id('ec5ecb05-af64-4c04-ac86-4d9f6f12f185')
+ def test_get_available_domain_scopes(self):
+ # Test for verifying that listing domain scopes for a user works if
+ # the user has a domain role or belongs to a group that has a domain
+ # role. For this test, admin client is used to add roles to alt user,
+ # which performs API calls, to avoid 401 Unauthorized errors.
+ alt_user_id = self.os_alt.credentials.user_id
+
+ def _create_user_domain_role_for_alt_user():
+ domain_id = self.setup_test_domain()['id']
+ role_id = self.setup_test_role()['id']
+
+ # Create a role association between the user and domain.
+ self.roles_client.create_user_role_on_domain(
+ domain_id, alt_user_id, role_id)
+ self.addCleanup(
+ self.roles_client.delete_role_from_user_on_domain,
+ domain_id, alt_user_id, role_id)
+
+ return domain_id
+
+ def _create_group_domain_role_for_alt_user():
+ domain_id = self.setup_test_domain()['id']
+ role_id = self.setup_test_role()['id']
+
+ # Create a group.
+ group_name = data_utils.rand_name('Group')
+ group_id = self.groups_client.create_group(
+ name=group_name, domain_id=domain_id)['group']['id']
+ self.addCleanup(self.groups_client.delete_group, group_id)
+
+ # Add the alt user to the group.
+ self.groups_client.add_group_user(group_id, alt_user_id)
+ self.addCleanup(self.groups_client.delete_group_user,
+ group_id, alt_user_id)
+
+ # Create a role association between the group and domain.
+ self.roles_client.create_group_role_on_domain(
+ domain_id, group_id, role_id)
+ self.addCleanup(
+ self.roles_client.delete_role_from_group_on_domain,
+ domain_id, group_id, role_id)
+
+ return domain_id
+
+ # Add the alt user to 2 random domains and 2 random groups
+ # with randomized domains and roles.
+ assigned_domain_ids = []
+ for _ in range(2):
+ domain_id = _create_user_domain_role_for_alt_user()
+ assigned_domain_ids.append(domain_id)
+ domain_id = _create_group_domain_role_for_alt_user()
+ assigned_domain_ids.append(domain_id)
+
+ # Get available domain scopes for the alt user.
+ available_domains = self.os_alt.identity_v3_client.list_auth_domains()[
+ 'domains']
+ fetched_domain_ids = [i['id'] for i in available_domains]
+
+ # Verify the expected domain IDs are in the list.
+ missing_domain_ids = \
+ [p for p in assigned_domain_ids if p not in fetched_domain_ids]
+ self.assertEmpty(missing_domain_ids,
+ "Failed to find domain_ids %s in fetched list"
+ % ", ".join(missing_domain_ids))
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 30d2a36..9edccbb 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -249,13 +249,16 @@
if 'description' not in kwargs:
kwargs['description'] = data_utils.rand_name('desc')
domain = cls.domains_client.create_domain(**kwargs)['domain']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.delete_domain, domain['id'])
return domain
- def delete_domain(self, domain_id):
+ @classmethod
+ def delete_domain(cls, domain_id):
# NOTE(mpavlase) It is necessary to disable the domain before deleting
# otherwise it raises Forbidden exception
- self.domains_client.update_domain(domain_id, enabled=False)
- self.domains_client.delete_domain(domain_id)
+ cls.domains_client.update_domain(domain_id, enabled=False)
+ cls.domains_client.delete_domain(domain_id)
def setup_test_user(self, password=None):
"""Set up a test user."""
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 70ba2fe..4911ba4 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -46,16 +46,6 @@
cls.created_images = []
@classmethod
- def resource_cleanup(cls):
- for image_id in cls.created_images:
- test_utils.call_and_ignore_notfound_exc(
- cls.client.delete_image, image_id)
-
- for image_id in cls.created_images:
- cls.client.wait_for_resource_deletion(image_id)
- super(BaseImageTest, cls).resource_cleanup()
-
- @classmethod
def create_image(cls, data=None, **kwargs):
"""Wrapper that returns a test image."""
@@ -75,6 +65,10 @@
if 'image' in image:
image = image['image']
cls.created_images.append(image['id'])
+ cls.addClassResourceCleanup(cls.client.wait_for_resource_deletion,
+ image['id'])
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.client.delete_image, image['id'])
return image
@classmethod
@@ -148,16 +142,17 @@
cls.schemas_client = cls.os_primary.schemas_client
cls.versions_client = cls.os_primary.image_versions_client
- def create_namespace(cls, namespace_name=None, visibility='public',
+ def create_namespace(self, namespace_name=None, visibility='public',
description='Tempest', protected=False,
**kwargs):
if not namespace_name:
namespace_name = data_utils.rand_name('test-ns')
kwargs.setdefault('display_name', namespace_name)
- namespace = cls.namespaces_client.create_namespace(
+ namespace = self.namespaces_client.create_namespace(
namespace=namespace_name, visibility=visibility,
description=description, protected=protected, **kwargs)
- cls.addCleanup(cls.namespaces_client.delete_namespace, namespace_name)
+ self.addCleanup(self.namespaces_client.delete_namespace,
+ namespace_name)
return namespace
@@ -189,23 +184,3 @@
disk_format='raw')
self.addCleanup(self.client.delete_image, image['id'])
return image['id']
-
-
-class BaseV1ImageAdminTest(BaseImageTest):
- credentials = ['admin', 'primary']
-
- @classmethod
- def setup_clients(cls):
- super(BaseV1ImageAdminTest, cls).setup_clients()
- cls.client = cls.os_primary.image_client
- cls.admin_client = cls.os_admin.image_client
-
-
-class BaseV2ImageAdminTest(BaseImageTest):
- credentials = ['admin', 'primary']
-
- @classmethod
- def setup_clients(cls):
- super(BaseV2ImageAdminTest, cls).setup_clients()
- cls.client = cls.os_primary.image_client_v2
- cls.admin_client = cls.os_admin.image_client_v2
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index c846f88..ce5bd3e 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -18,8 +18,6 @@
import six
-import testtools
-
from oslo_log import log as logging
from tempest.api.image import base
from tempest import config
@@ -128,8 +126,6 @@
self.assertEqual(image['id'], body['id'])
self.assertEqual(new_image_name, body['name'])
- @testtools.skipUnless(CONF.image_feature_enabled.deactivate_image,
- 'deactivate-image is not available.')
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
# Create image
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py b/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
index 69bebfe..482e808 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
@@ -49,7 +49,7 @@
# List namespace tags
body = self.namespace_tags_client.list_namespace_tags(
namespace['namespace'])
- self.assertTrue(3, len(body['tags']))
+ self.assertEqual(3, len(body['tags']))
self.assertIn(body['tags'][0]['name'], self.tag_list)
self.assertIn(body['tags'][1]['name'], self.tag_list)
self.assertIn(body['tags'][2]['name'], self.tag_list)
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
index 4d41e33..49a9cdb 100644
--- a/tempest/api/network/admin/test_external_network_extension.py
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -130,5 +130,3 @@
subnet_list = self.admin_subnets_client.list_subnets()
self.assertNotIn(subnet['id'],
(s['id'] for s in subnet_list))
- # Removes subnet from the cleanup list
- self.subnets.remove(subnet)
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index 1a7b0ec..206d867 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -51,7 +51,8 @@
agents = cls.admin_agents_client.list_agents(
agent_type=AGENT_TYPE)['agents']
for agent in agents:
- if agent['configurations']['agent_mode'] in AGENT_MODES:
+ if (agent['configurations']['agent_mode'] in AGENT_MODES and
+ agent['alive']):
cls.agent = agent
break
else:
diff --git a/tempest/api/network/admin/test_metering_extensions.py b/tempest/api/network/admin/test_metering_extensions.py
index fd86782..5063fef 100644
--- a/tempest/api/network/admin/test_metering_extensions.py
+++ b/tempest/api/network/admin/test_metering_extensions.py
@@ -15,6 +15,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -52,7 +53,10 @@
description=description,
name=name)
metering_label = body['metering_label']
- cls.metering_labels.append(metering_label)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.admin_metering_labels_client.delete_metering_label,
+ metering_label['id'])
return metering_label
@classmethod
@@ -64,7 +68,9 @@
remote_ip_prefix=remote_ip_prefix, direction=direction,
metering_label_id=metering_label_id)
metering_label_rule = body['metering_label_rule']
- cls.metering_label_rules.append(metering_label_rule)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ client.delete_metering_label_rule, metering_label_rule['id'])
return metering_label_rule
def _delete_metering_label(self, metering_label_id):
diff --git a/tempest/api/network/admin/test_ports.py b/tempest/api/network/admin/test_ports.py
index 807994b..483b405 100644
--- a/tempest/api/network/admin/test_ports.py
+++ b/tempest/api/network/admin/test_ports.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import socket
-
from tempest.api.network import base
from tempest import config
from tempest.lib import decorators
@@ -25,10 +23,16 @@
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@classmethod
+ def setup_clients(cls):
+ super(PortsAdminExtendedAttrsTestJSON, cls).setup_clients()
+ cls.hyper_client = cls.os_admin.hypervisor_client
+
+ @classmethod
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
- cls.host_id = socket.gethostname()
+ hyper_list = cls.hyper_client.list_hypervisors()
+ cls.host_id = hyper_list['hypervisors'][0]['hypervisor_hostname']
@decorators.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
def test_create_port_binding_ext_attr(self):
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index cf4236d..b1e4a58 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.network import base
from tempest.common import identity
from tempest.common import utils
@@ -80,8 +82,24 @@
non_default_quotas = self.admin_quotas_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(project_id, q['tenant_id'])
+ quota_set = self.admin_quotas_client.show_quotas(project_id)['quota']
+ default_quotas = self.admin_quotas_client.show_default_quotas(
+ project_id)['quota']
+ self.assertEqual(default_quotas, quota_set)
@decorators.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
def test_quotas(self):
new_quotas = {'network': 0, 'port': 0}
self._check_quotas(new_quotas)
+
+ @testtools.skipUnless(utils.is_extension_enabled(
+ 'quota_details', 'network'), 'Quota details extension not enabled.')
+ @decorators.idempotent_id('7b05ec5f-bf44-43cb-b28f-ddd72a824288')
+ def test_show_quota_details(self):
+ # Show quota details for an existing project
+ quota_details = self.admin_quotas_client.show_quota_details(
+ self.admin_quotas_client.tenant_id)['quota']
+ expected_keys = ['used', 'limit', 'reserved']
+ for resource_type in quota_details:
+ for key in expected_keys:
+ self.assertIn(key, quota_details[resource_type])
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 8308e34..8670165 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -88,13 +88,9 @@
@classmethod
def resource_setup(cls):
super(BaseNetworkTest, cls).resource_setup()
- cls.networks = []
cls.subnets = []
cls.ports = []
cls.routers = []
- cls.floating_ips = []
- cls.metering_labels = []
- cls.metering_label_rules = []
cls.ethertype = "IPv" + str(cls._ip_version)
if cls._ip_version == 4:
cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
@@ -104,46 +100,6 @@
cls.mask_bits = CONF.network.project_network_v6_mask_bits
@classmethod
- def resource_cleanup(cls):
- if CONF.service_available.neutron:
- # Clean up floating IPs
- for floating_ip in cls.floating_ips:
- test_utils.call_and_ignore_notfound_exc(
- cls.floating_ips_client.delete_floatingip,
- floating_ip['id'])
-
- # Clean up metering label rules
- # Not all classes in the hierarchy have the client class variable
- if cls.metering_label_rules:
- label_rules_client = cls.admin_metering_label_rules_client
- for metering_label_rule in cls.metering_label_rules:
- test_utils.call_and_ignore_notfound_exc(
- label_rules_client.delete_metering_label_rule,
- metering_label_rule['id'])
- # Clean up metering labels
- for metering_label in cls.metering_labels:
- test_utils.call_and_ignore_notfound_exc(
- cls.admin_metering_labels_client.delete_metering_label,
- metering_label['id'])
- # Clean up ports
- for port in cls.ports:
- test_utils.call_and_ignore_notfound_exc(
- cls.ports_client.delete_port, port['id'])
- # Clean up routers
- for router in cls.routers:
- test_utils.call_and_ignore_notfound_exc(
- cls.delete_router, router)
- # Clean up subnets
- for subnet in cls.subnets:
- test_utils.call_and_ignore_notfound_exc(
- cls.subnets_client.delete_subnet, subnet['id'])
- # Clean up networks
- for network in cls.networks:
- test_utils.call_and_ignore_notfound_exc(
- cls.networks_client.delete_network, network['id'])
- super(BaseNetworkTest, cls).resource_cleanup()
-
- @classmethod
def create_network(cls, network_name=None, **kwargs):
"""Wrapper utility that returns a test network."""
network_name = network_name or data_utils.rand_name(
@@ -151,7 +107,9 @@
body = cls.networks_client.create_network(name=network_name, **kwargs)
network = body['network']
- cls.networks.append(network)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.networks_client.delete_network,
+ network['id'])
return network
@classmethod
@@ -194,6 +152,9 @@
message = 'Available CIDR for subnet creation could not be found'
raise exceptions.BuildErrorException(message)
subnet = body['subnet']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.subnets_client.delete_subnet,
+ subnet['id'])
cls.subnets.append(subnet)
return subnet
@@ -203,6 +164,8 @@
body = cls.ports_client.create_port(network_id=network['id'],
**kwargs)
port = body['port']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.ports_client.delete_port, port['id'])
cls.ports.append(port)
return port
@@ -229,6 +192,8 @@
name=router_name, external_gateway_info=ext_gw_info,
admin_state_up=admin_state_up, **kwargs)
router = body['router']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.delete_router, router)
cls.routers.append(router)
return router
@@ -238,7 +203,9 @@
body = cls.floating_ips_client.create_floatingip(
floating_network_id=external_network_id)
fip = body['floatingip']
- cls.floating_ips.append(fip)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.floating_ips_client.delete_floatingip,
+ fip['id'])
return fip
@classmethod
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 1c59556..7345fd1 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -104,15 +104,6 @@
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
expected, exclude_keys))
- def _delete_network(self, network):
- # Deleting network also deletes its subnets if exists
- self.networks_client.delete_network(network['id'])
- if network in self.networks:
- self.networks.remove(network)
- for subnet in self.subnets:
- if subnet['network_id'] == network['id']:
- self.subnets.remove(subnet)
-
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
**kwargs):
network = self.create_network()
@@ -132,8 +123,6 @@
self._compare_resource_attrs(subnet, compare_args)
self.networks_client.delete_network(net_id)
- self.networks.pop()
- self.subnets.pop()
class NetworksTest(BaseNetworkTestResources):
@@ -171,7 +160,7 @@
def test_create_update_delete_network_subnet(self):
# Create a network
network = self.create_network()
- self.addCleanup(self._delete_network, network)
+ self.addCleanup(self.networks_client.delete_network, network['id'])
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
@@ -280,7 +269,7 @@
network = self.create_network()
net_id = network['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_network, network)
+ self.networks_client.delete_network, network['id'])
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
@@ -324,7 +313,7 @@
@decorators.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
- self.addCleanup(self._delete_network, network)
+ self.addCleanup(self.networks_client.delete_network, network['id'])
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
@@ -622,7 +611,6 @@
port = self.create_port(slaac_network)
self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
self.subnets_client.delete_subnet(subnet_slaac['id'])
- self.subnets.pop()
subnets = self.subnets_client.list_subnets()
subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
self.assertNotIn(subnet_slaac['id'], subnet_ids,
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index eb53fbb..5168423 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -13,7 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ipaddress
+
import netaddr
+import six
import testtools
from tempest.api.network import base_security_groups as sec_base
@@ -178,6 +181,83 @@
self.assertIn(port_1_fixed_ip, port_ips)
self.assertIn(network['id'], port_net_ids)
+ @decorators.idempotent_id('79895408-85d5-460d-94e7-9531c5fd9123')
+ @testtools.skipUnless(
+ utils.is_extension_enabled('ip-substring-filtering', 'network'),
+ 'ip-substring-filtering extension not enabled.')
+ def test_port_list_filter_by_ip_substr(self):
+ # Create network and subnet
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+
+ # Get two IP addresses
+ ip_address_1 = None
+ ip_address_2 = None
+ ip_network = ipaddress.ip_network(six.text_type(subnet['cidr']))
+ for ip in ip_network:
+ if ip == ip_network.network_address:
+ continue
+ if ip_address_1 is None:
+ ip_address_1 = six.text_type(ip)
+ else:
+ ip_address_2 = ip_address_1
+ ip_address_1 = six.text_type(ip)
+ # Make sure these two IP addresses have different substring
+ if ip_address_1[:-1] != ip_address_2[:-1]:
+ break
+
+ # Create two ports
+ fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_1}]
+ port_1 = self.ports_client.create_port(network_id=network['id'],
+ fixed_ips=fixed_ips)
+ self.addCleanup(self.ports_client.delete_port, port_1['port']['id'])
+ fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_2}]
+ port_2 = self.ports_client.create_port(network_id=network['id'],
+ fixed_ips=fixed_ips)
+ self.addCleanup(self.ports_client.delete_port, port_2['port']['id'])
+
+ # Scenario 1: List port1 (port2 is filtered out)
+ if ip_address_1[:-1] != ip_address_2[:-1]:
+ ips_filter = 'ip_address_substr=' + ip_address_1[:-1]
+ else:
+ ips_filter = 'ip_address_substr=' + ip_address_1
+ ports = self.ports_client.list_ports(fixed_ips=ips_filter)['ports']
+ # Check that we got the desired port
+ port_ids = [port['id'] for port in ports]
+ fixed_ips = [port['fixed_ips'] for port in ports]
+ port_ips = []
+ for addr in fixed_ips:
+ port_ips.extend([a['ip_address'] for a in addr])
+
+ port_net_ids = [port['network_id'] for port in ports]
+ self.assertIn(network['id'], port_net_ids)
+ self.assertIn(port_1['port']['id'], port_ids)
+ self.assertIn(port_1['port']['fixed_ips'][0]['ip_address'], port_ips)
+ self.assertNotIn(port_2['port']['id'], port_ids)
+ self.assertNotIn(
+ port_2['port']['fixed_ips'][0]['ip_address'], port_ips)
+
+ # Scenario 2: List both port1 and port2
+ substr = ip_address_1
+ while substr not in ip_address_2:
+ substr = substr[:-1]
+ ips_filter = 'ip_address_substr=' + substr
+ ports = self.ports_client.list_ports(fixed_ips=ips_filter)['ports']
+ # Check that we got both port
+ port_ids = [port['id'] for port in ports]
+ fixed_ips = [port['fixed_ips'] for port in ports]
+ port_ips = []
+ for addr in fixed_ips:
+ port_ips.extend([a['ip_address'] for a in addr])
+
+ port_net_ids = [port['network_id'] for port in ports]
+ self.assertIn(network['id'], port_net_ids)
+ self.assertIn(port_1['port']['id'], port_ids)
+ self.assertIn(port_1['port']['fixed_ips'][0]['ip_address'], port_ips)
+ self.assertIn(port_2['port']['id'], port_ids)
+ self.assertIn(port_2['port']['fixed_ips'][0]['ip_address'], port_ips)
+
@decorators.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 99ffaa8..abbb779 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -65,9 +65,12 @@
'The public_network_id option must be specified.')
def test_create_show_list_update_delete_router(self):
# Create a router
+ name = data_utils.rand_name(self.__class__.__name__ + '-router')
router = self._create_router(
+ name=name,
admin_state_up=False,
external_network_id=CONF.network.public_network_id)
+ self.assertEqual(router['name'], name)
self.assertEqual(router['admin_state_up'], False)
self.assertEqual(
router['external_gateway_info']['network_id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index c9ce55c..ddd7d3a 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -84,6 +84,8 @@
def test_router_remove_interface_in_use_returns_409(self):
self.routers_client.add_router_interface(self.router['id'],
subnet_id=self.subnet['id'])
+ self.addCleanup(self.routers_client.remove_router_interface,
+ self.router['id'], subnet_id=self.subnet['id'])
self.assertRaises(lib_exc.Conflict,
self.routers_client.delete_router,
self.router['id'])
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 97ccee9..24bd8ea 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -23,7 +23,6 @@
class SecGroupTest(base.BaseSecGroupTest):
- _project_network_cidr = CONF.network.project_network_cidr
@classmethod
def skip_checks(cls):
@@ -209,7 +208,7 @@
protocol = 'tcp'
port_range_min = 76
port_range_max = 77
- ip_prefix = self._project_network_cidr
+ ip_prefix = str(self.cidr)
self._create_verify_security_group_rule(sg_id, direction,
self.ethertype, protocol,
port_range_min,
@@ -238,4 +237,3 @@
class SecGroupIPv6Test(SecGroupTest):
_ip_version = 6
- _project_network_cidr = CONF.network.project_network_v6_cidr
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index 435673b..d054865 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -24,7 +24,6 @@
class NegativeSecGroupTest(base.BaseSecGroupTest):
- _project_network_cidr = CONF.network.project_network_cidr
@classmethod
def skip_checks(cls):
@@ -110,7 +109,7 @@
sg2_body, _ = self._create_security_group()
# Create rule specifying both remote_ip_prefix and remote_group_id
- prefix = self._project_network_cidr
+ prefix = str(self.cidr)
self.assertRaises(
lib_exc.BadRequest,
self.security_group_rules_client.create_security_group_rule,
@@ -225,7 +224,6 @@
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
_ip_version = 6
- _project_network_cidr = CONF.network.project_network_v6_cidr
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7607439c-af73-499e-bf64-f687fd12a842')
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index 409d556..85f6896 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -131,11 +131,8 @@
prefix = CONF.network.default_network
cls.subnetpool = cls.subnetpools_client.create_subnetpool(
name=subnetpool_name, prefixes=prefix)['subnetpool']
-
- @classmethod
- def resource_cleanup(cls):
- cls.subnetpools_client.delete_subnetpool(cls.subnetpool['id'])
- super(TagsExtTest, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.subnetpools_client.delete_subnetpool,
+ cls.subnetpool['id'])
def _create_tags_for_each_resource(self):
# Create a tag for each resource in `SUPPORTED_RESOURCES` and return
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 24c9c24..e8f3f8b 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -36,10 +36,14 @@
using HA proxy sync the deletion properly, otherwise, the container
might fail to be deleted because it's not empty.
- :param containers: List of containers to be deleted
+ :param containers: List of containers(or string of a container)
+ to be deleted
:param container_client: Client to be used to delete containers
:param object_client: Client to be used to delete objects
"""
+ if isinstance(containers, str):
+ containers = [containers]
+
for cont in containers:
try:
params = {'limit': 9999, 'format': 'json'}
@@ -106,7 +110,7 @@
def create_container(cls):
# wrapper that returns a test container
container_name = data_utils.rand_name(name='TestContainer')
- cls.container_client.create_container(container_name)
+ cls.container_client.update_container(container_name)
cls.containers.append(container_name)
return container_name
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 0f86540..c5c30e3 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -43,15 +43,14 @@
super(AccountTest, cls).resource_setup()
for i in range(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % six.int2byte(i))
- cls.container_client.create_container(name)
+ cls.container_client.update_container(name)
+ cls.addClassResourceCleanup(base.delete_containers,
+ [name],
+ cls.container_client,
+ cls.object_client)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(AccountTest, cls).resource_cleanup()
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
def test_list_containers(self):
@@ -242,7 +241,7 @@
@decorators.idempotent_id('365e6fc7-1cfe-463b-a37c-8bd08d47b6aa')
def test_list_containers_with_prefix(self):
# list containers that have a name that starts with a prefix
- prefix = '{0}-a'.format(CONF.resources_prefix)
+ prefix = 'tempest-a'
params = {'prefix': prefix}
resp, container_list = self.account_client.list_account_containers(
params=params)
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 03a5879..90b24b4 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -39,7 +39,7 @@
def setUp(self):
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
- self.container_client.create_container(self.container_name)
+ self.container_client.update_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index c6f21ec..cdc420e 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -27,7 +27,7 @@
@decorators.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
def test_create_container(self):
container_name = data_utils.rand_name(name='TestContainer')
- resp, _ = self.container_client.create_container(container_name)
+ resp, _ = self.container_client.update_container(container_name)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@@ -35,20 +35,20 @@
def test_create_container_overwrite(self):
# overwrite container with the same name
container_name = data_utils.rand_name(name='TestContainer')
- self.container_client.create_container(container_name)
+ self.container_client.update_container(container_name)
self.containers.append(container_name)
- resp, _ = self.container_client.create_container(container_name)
+ resp, _ = self.container_client.update_container(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@decorators.idempotent_id('c2ac4d59-d0f5-40d5-ba19-0635056d48cd')
def test_create_container_with_metadata_key(self):
# create container with the blank value of metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta': ''}
- resp, _ = self.container_client.create_container(
+ headers = {'X-Container-Meta-test-container-meta': ''}
+ resp, _ = self.container_client.update_container(
container_name,
- metadata=metadata)
+ **headers)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@@ -64,10 +64,10 @@
container_name = data_utils.rand_name(name='TestContainer')
# metadata name using underscores should be converted to hyphens
- metadata = {'test_container_meta': 'Meta1'}
- resp, _ = self.container_client.create_container(
+ headers = {'X-Container-Meta-test_container_meta': 'Meta1'}
+ resp, _ = self.container_client.update_container(
container_name,
- metadata=metadata)
+ **headers)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@@ -75,22 +75,20 @@
container_name)
self.assertIn('x-container-meta-test-container-meta', resp)
self.assertEqual(resp['x-container-meta-test-container-meta'],
- metadata['test_container_meta'])
+ headers['X-Container-Meta-test_container_meta'])
@decorators.idempotent_id('24d16451-1c0c-4e4f-b59c-9840a3aba40e')
def test_create_container_with_remove_metadata_key(self):
# create container with the blank value of remove metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata_1 = {'test-container-meta': 'Meta1'}
- self.container_client.create_container(
- container_name,
- metadata=metadata_1)
+ headers = {'X-Container-Meta-test-container-meta': 'Meta1'}
+ self.container_client.update_container(container_name, **headers)
self.containers.append(container_name)
- metadata_2 = {'test-container-meta': ''}
- resp, _ = self.container_client.create_container(
+ headers = {'X-Remove-Container-Meta-test-container-meta': ''}
+ resp, _ = self.container_client.update_container(
container_name,
- remove_metadata=metadata_2)
+ **headers)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
@@ -101,14 +99,13 @@
def test_create_container_with_remove_metadata_value(self):
# create container with remove metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata)
+ headers = {'X-Container-Meta-test-container-meta': 'Meta1'}
+ self.container_client.update_container(container_name, **headers)
self.containers.append(container_name)
-
- resp, _ = self.container_client.create_container(
+ headers = {'X-Remove-Container-Meta-test-container-meta': 'Meta1'}
+ resp, _ = self.container_client.update_container(
container_name,
- remove_metadata=metadata)
+ **headers)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
@@ -301,9 +298,8 @@
def test_update_container_metadata_with_create_and_delete_metadata(self):
# Send one request of adding and deleting metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata_1 = {'test-container-meta1': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata_1)
+ metadata_1 = {'X-Container-Meta-test-container-meta1': 'Meta1'}
+ self.container_client.update_container(container_name, **metadata_1)
self.containers.append(container_name)
metadata_2 = {'test-container-meta2': 'Meta2'}
@@ -311,7 +307,7 @@
self.container_client.create_update_or_delete_container_metadata(
container_name,
create_update_metadata=metadata_2,
- delete_metadata=metadata_1))
+ delete_metadata={'test-container-meta1': 'Meta1'}))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
@@ -343,15 +339,14 @@
def test_update_container_metadata_with_delete_metadata(self):
# update container metadata using delete metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta1': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata)
+ metadata = {'X-Container-Meta-test-container-meta1': 'Meta1'}
+ self.container_client.update_container(container_name, **metadata)
self.containers.append(container_name)
resp, _ = (
self.container_client.create_update_or_delete_container_metadata(
container_name,
- delete_metadata=metadata))
+ delete_metadata={'test-container-meta1': 'Meta1'}))
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
@@ -378,9 +373,8 @@
def test_update_container_metadata_with_delete_metadata_key(self):
# update container metadata with a blank value of metadata
container_name = data_utils.rand_name(name='TestContainer')
- metadata = {'test-container-meta1': 'Meta1'}
- self.container_client.create_container(container_name,
- metadata=metadata)
+ headers = {'X-Container-Meta-test-container-meta1': 'Meta1'}
+ self.container_client.update_container(container_name, **headers)
self.containers.append(container_name)
metadata = {'test-container-meta1': ''}
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index 707c016..b8c83b7 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -45,9 +45,10 @@
max_length = self.constraints['max_container_name_length']
# create a container with long name
container_name = data_utils.arbitrary_string(size=max_length + 1)
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name)
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name)
self.assertIn('Container name length of ' + str(max_length + 1) +
' longer than ' + str(max_length), str(ex))
@@ -61,11 +62,13 @@
# that is longer than max.
max_length = self.constraints['max_meta_name_length']
container_name = data_utils.rand_name(name='TestContainer')
- metadata_name = data_utils.arbitrary_string(size=max_length + 1)
+ metadata_name = 'X-Container-Meta-' + data_utils.arbitrary_string(
+ size=max_length + 1)
metadata = {metadata_name: 'penguin'}
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name, metadata=metadata)
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name, **metadata)
self.assertIn('Metadata name too long', str(ex))
@decorators.attr(type=["negative"])
@@ -79,10 +82,11 @@
max_length = self.constraints['max_meta_value_length']
container_name = data_utils.rand_name(name='TestContainer')
metadata_value = data_utils.arbitrary_string(size=max_length + 1)
- metadata = {'animal': metadata_value}
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name, metadata=metadata)
+ metadata = {'X-Container-Meta-animal': metadata_value}
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name, **metadata)
self.assertIn('Metadata value longer than ' + str(max_length), str(ex))
@decorators.attr(type=["negative"])
@@ -97,11 +101,12 @@
container_name = data_utils.rand_name(name='TestContainer')
metadata = {}
for i in range(max_count + 1):
- metadata['animal-' + str(i)] = 'penguin'
+ metadata['X-Container-Meta-animal-' + str(i)] = 'penguin'
- ex = self.assertRaises(exceptions.BadRequest,
- self.container_client.create_container,
- container_name, metadata=metadata)
+ ex = self.assertRaises(
+ exceptions.BadRequest,
+ self.container_client.update_container,
+ container_name, **metadata)
self.assertIn('Too many metadata items; max ' + str(max_count),
str(ex))
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 042d288..322579c 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -33,8 +33,6 @@
class ContainerSyncTest(base.BaseObjectTest):
- clients = {}
-
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@@ -54,6 +52,7 @@
super(ContainerSyncTest, cls).resource_setup()
cls.containers = []
cls.objects = []
+ cls.clients = {}
# Default container-server config only allows localhost
cls.local_ip = '127.0.0.1'
@@ -72,14 +71,12 @@
(cls.container_client_alt, cls.object_client_alt)
for cont_name, client in cls.clients.items():
client[0].create_container(cont_name)
+ cls.addClassResourceCleanup(base.delete_containers,
+ cont_name,
+ client[0],
+ client[1])
cls.containers.append(cont_name)
- @classmethod
- def resource_cleanup(cls):
- for client in cls.clients.values():
- cls.delete_containers(client[0], client[1])
- super(ContainerSyncTest, cls).resource_cleanup()
-
def _test_container_synchronization(self, make_headers):
# container to container synchronization
# to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index ed1be90..86f7c8c 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -40,10 +40,10 @@
def _test_object_expiry(self, metadata):
# update object metadata
resp, _ = \
- self.object_client.update_object_metadata(self.container_name,
- self.object_name,
- metadata,
- metadata_prefix='')
+ self.object_client.create_or_update_object_metadata(
+ self.container_name,
+ self.object_name,
+ headers=metadata)
# verify object metadata
resp, _ = \
self.object_client.list_object_metadata(self.container_name,
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 836a875..acb578d 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -48,8 +48,9 @@
data_segments = [data + str(i) for i in range(segments)]
# uploading segments
for i in range(segments):
- self.object_client.create_object_segments(
- self.container_name, object_name, i, data_segments[i])
+ obj_name = "%s/%s" % (object_name, i)
+ self.object_client.create_object(
+ self.container_name, obj_name, data_segments[i])
return object_name, data_segments
@@ -184,12 +185,15 @@
# create object with transfer_encoding
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(1024)
- _, _, resp_headers = self.object_client.put_object_with_chunk(
- container=self.container_name,
- name=object_name,
- contents=data_utils.chunkify(data, 512)
- )
- self.assertHeaders(resp_headers, 'Object', 'PUT')
+ headers = {'Transfer-Encoding': 'chunked'}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data=data_utils.chunkify(data, 512),
+ headers=headers,
+ chunked=True)
+
+ self.assertHeaders(resp, 'Object', 'PUT')
# check uploaded content
_, body = self.object_client.get_object(self.container_name,
@@ -325,11 +329,10 @@
object_name, _ = self.create_object(self.container_name)
metadata = {'X-Object-Meta-test-meta': 'Meta'}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- metadata,
- metadata_prefix='')
+ headers=metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -350,11 +353,10 @@
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -375,11 +377,10 @@
update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
'X-Remove-Object-Meta-test-meta1': 'Meta1'}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -403,11 +404,10 @@
metadata=None)
object_prefix = '%s/%s' % (self.container_name, object_name)
update_metadata = {'X-Object-Manifest': object_prefix}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -422,11 +422,10 @@
object_name, _ = self.create_object(self.container_name)
update_metadata = {'X-Object-Meta-test-meta': ''}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -447,11 +446,10 @@
metadata=create_metadata)
update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
- resp, _ = self.object_client.update_object_metadata(
+ resp, _ = self.object_client.create_or_update_object_metadata(
self.container_name,
object_name,
- update_metadata,
- metadata_prefix='')
+ headers=update_metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -728,8 +726,13 @@
dst_object_name,
dst_data)
# copy source object to destination
- resp, _ = self.object_client.copy_object_in_same_container(
- self.container_name, src_object_name, dst_object_name)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
+ str(src_object_name))
+ resp, body = self.object_client.create_object(self.container_name,
+ dst_object_name,
+ data=None,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check data
@@ -749,8 +752,14 @@
# change the content type of the object
metadata = {'content-type': 'text/plain; charset=UTF-8'}
self.assertNotEqual(resp_tmp['content-type'], metadata['content-type'])
- resp, _ = self.object_client.copy_object_in_same_container(
- self.container_name, object_name, object_name, metadata)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
+ str(object_name))
+ resp, body = self.object_client.create_object(self.container_name,
+ object_name,
+ data=None,
+ metadata=metadata,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check the content type
@@ -786,12 +795,12 @@
def test_copy_object_across_containers(self):
# create a container to use as a source container
src_container_name = data_utils.rand_name(name='TestSourceContainer')
- self.container_client.create_container(src_container_name)
+ self.container_client.update_container(src_container_name)
self.containers.append(src_container_name)
# create a container to use as a destination container
dst_container_name = data_utils.rand_name(
name='TestDestinationContainer')
- self.container_client.create_container(dst_container_name)
+ self.container_client.update_container(dst_container_name)
self.containers.append(dst_container_name)
# create object in source container
object_name = data_utils.rand_name(name='Object')
@@ -801,16 +810,21 @@
# set object metadata
meta_key = data_utils.rand_name(name='test')
meta_value = data_utils.rand_name(name='MetaValue')
- orig_metadata = {meta_key: meta_value}
- resp, _ = self.object_client.update_object_metadata(src_container_name,
- object_name,
- orig_metadata)
+ orig_metadata = {'X-Object-Meta-' + meta_key: meta_value}
+ resp, _ = self.object_client.create_or_update_object_metadata(
+ src_container_name,
+ object_name,
+ headers=orig_metadata)
self.assertHeaders(resp, 'Object', 'POST')
# copy object from source container to destination container
- resp, _ = self.object_client.copy_object_across_containers(
- src_container_name, object_name, dst_container_name,
- object_name)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(src_container_name),
+ str(object_name))
+ resp, body = self.object_client.create_object(dst_container_name,
+ object_name,
+ data=None,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check if object is present in destination container
@@ -897,8 +911,9 @@
data_segments = [data + str(i) for i in range(segments)]
# uploading segments
for i in range(segments):
- resp, _ = self.object_client.create_object_segments(
- self.container_name, object_name, i, data_segments[i])
+ obj_name = "%s/%s" % (object_name, i)
+ resp, _ = self.object_client.create_object(
+ self.container_name, obj_name, data_segments[i])
# creating a manifest file
metadata = {'X-Object-Manifest': '%s/%s/'
% (self.container_name, object_name)}
@@ -906,8 +921,8 @@
object_name, data='')
self.assertHeaders(resp, 'Object', 'PUT')
- resp, _ = self.object_client.update_object_metadata(
- self.container_name, object_name, metadata, metadata_prefix='')
+ resp, _ = self.object_client.create_or_update_object_metadata(
+ self.container_name, object_name, headers=metadata)
self.assertHeaders(resp, 'Object', 'POST')
resp, _ = self.object_client.list_object_metadata(
@@ -977,7 +992,7 @@
def setUp(self):
super(PublicObjectTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
- self.container_client.create_container(self.container_name)
+ self.container_client.update_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 4799053..75111b6 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -24,16 +24,6 @@
class ContainerTest(base.BaseObjectTest):
- @classmethod
- def resource_setup(cls):
- super(ContainerTest, cls).resource_setup()
- cls.containers = []
-
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(ContainerTest, cls).resource_cleanup()
-
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
self.assertHeaders(resp, 'Container', 'HEAD')
@@ -51,19 +41,23 @@
def test_versioned_container(self):
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
- resp, _ = self.container_client.create_container(
- vers_container_name)
- self.containers.append(vers_container_name)
+ resp, _ = self.container_client.update_container(vers_container_name)
+ self.addCleanup(base.delete_containers,
+ [vers_container_name],
+ self.container_client,
+ self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
base_container_name = data_utils.rand_name(name='TestBaseContainer')
headers = {'X-versions-Location': vers_container_name}
- resp, _ = self.container_client.create_container(
+ resp, _ = self.container_client.update_container(
base_container_name,
- metadata=headers,
- metadata_prefix='')
- self.containers.append(base_container_name)
+ **headers)
+ self.addCleanup(base.delete_containers,
+ [base_container_name],
+ self.container_client,
+ self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
new file mode 100644
index 0000000..45f4caa
--- /dev/null
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -0,0 +1,202 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseGroupSnapshotsTest(base.BaseVolumeAdminTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseGroupSnapshotsTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
+ def _create_group_snapshot(self, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '-Group_Snapshot')
+
+ group_snapshot = self.group_snapshots_client.create_group_snapshot(
+ **kwargs)['group_snapshot']
+ group_snapshot['group_id'] = kwargs['group_id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_group_snapshot, group_snapshot)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], 'available')
+ return group_snapshot
+
+ def _delete_group_snapshot(self, group_snapshot):
+ self.group_snapshots_client.delete_group_snapshot(group_snapshot['id'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for vol in vols:
+ for snap in snapshots:
+ if (vol['group_id'] == group_snapshot['group_id'] and
+ vol['id'] == snap['volume_id']):
+ self.snapshots_client.wait_for_resource_deletion(
+ snap['id'])
+ self.group_snapshots_client.wait_for_resource_deletion(
+ group_snapshot['id'])
+
+
+class GroupSnapshotsTest(BaseGroupSnapshotsTest):
+ _api_version = 3
+ min_microversion = '3.14'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
+ def test_group_snapshot_create_show_list_delete(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = self._create_group_snapshot(
+ group_id=grp['id'], name=group_snapshot_name)
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get a given group snapshot
+ group_snapshot = self.group_snapshots_client.show_group_snapshot(
+ group_snapshot['id'])['group_snapshot']
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get all group snapshots with details, check some detail-specific
+ # elements, and look for the created group snapshot
+ group_snapshots = self.group_snapshots_client.list_group_snapshots(
+ detail=True)['group_snapshots']
+ for grp_snapshot in group_snapshots:
+ self.assertIn('created_at', grp_snapshot)
+ self.assertIn('group_id', grp_snapshot)
+ self.assertIn((group_snapshot['name'], group_snapshot['id']),
+ [(m['name'], m['id']) for m in group_snapshots])
+
+ # Delete group snapshot
+ self._delete_group_snapshot(group_snapshot)
+ group_snapshots = self.group_snapshots_client.list_group_snapshots()[
+ 'group_snapshots']
+ self.assertEmpty(group_snapshots)
+
+ @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
+ def test_create_group_from_group_snapshot(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create Group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group_snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = self._create_group_snapshot(
+ group_id=grp['id'], name=group_snapshot_name)
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Create Group from Group snapshot
+ grp_name2 = data_utils.rand_name('Group_from_snap')
+ grp2 = self.groups_client.create_group_from_source(
+ group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
+ self.addCleanup(self.delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, vol['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp2['id'], 'available')
+
+
+class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
+ _api_version = 3
+ min_microversion = '3.19'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
+ def test_reset_group_snapshot_status(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ group = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=group['id'])
+
+ # Create group snapshot
+ group_snapshot = self._create_group_snapshot(group_id=group['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if volume['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Reset group snapshot status
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.group_snapshots_client,
+ group_snapshot['id'], 'available')
+ self.addCleanup(
+ self.admin_group_snapshots_client.reset_group_snapshot_status,
+ group_snapshot['id'], 'available')
+ for status in ['creating', 'available', 'error']:
+ self.admin_group_snapshots_client.reset_group_snapshot_status(
+ group_snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], status)
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
new file mode 100644
index 0000000..c5e6d1a
--- /dev/null
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -0,0 +1,80 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
+ _api_version = 3
+ min_microversion = '3.11'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('bb4e30d0-de6e-4f4d-866c-dcc48d023b4e')
+ def test_group_type_specs_create_show_update_list_delete(self):
+ # Create new group type
+ group_type = self.create_group_type()
+
+ # Create new group type specs
+ create_specs = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ body = self.admin_group_types_client.create_or_update_group_type_specs(
+ group_type['id'], create_specs)['group_specs']
+ self.assertEqual(create_specs, body)
+
+ # Create a new group type spec and update an existing group type spec
+ update_specs = {
+ "key2": "value2-updated",
+ "key3": "value3"
+ }
+ body = self.admin_group_types_client.create_or_update_group_type_specs(
+ group_type['id'], update_specs)['group_specs']
+ self.assertEqual(update_specs, body)
+
+ # Show specified item of group type specs
+ spec_keys = ['key2', 'key3']
+ for key in spec_keys:
+ body = self.admin_group_types_client.show_group_type_specs_item(
+ group_type['id'], key)
+ self.assertIn(key, body)
+ self.assertEqual(update_specs[key], body[key])
+
+ # Update specified item of group type specs
+ update_key = 'key3'
+ update_spec = {update_key: "value3-updated"}
+ body = self.admin_group_types_client.update_group_type_specs_item(
+ group_type['id'], update_key, update_spec)
+ self.assertEqual(update_spec, body)
+
+ # List all group type specs that created or updated above
+ list_specs = {}
+ list_specs.update(create_specs)
+ list_specs.update(update_specs)
+ list_specs.update(update_spec)
+ body = self.admin_group_types_client.list_group_type_specs(
+ group_type['id'])['group_specs']
+ self.assertEqual(list_specs, body)
+
+ # Delete specified item of group type specs
+ delete_key = 'key1'
+ self.admin_group_types_client.delete_group_type_specs_item(
+ group_type['id'], delete_key)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.admin_group_types_client.show_group_type_specs_item,
+ group_type['id'], delete_key)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 0df5fbd..6723207 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -24,7 +24,7 @@
max_microversion = 'latest'
@decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
- def test_group_type_create_list_show(self):
+ def test_group_type_create_list_update_show(self):
# Create/list/show group type.
name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
description = data_utils.rand_name("group-type-description")
@@ -46,8 +46,19 @@
self.assertIsInstance(group_list, list)
self.assertNotEmpty(group_list)
+ update_params = {
+ 'name': data_utils.rand_name(
+ self.__class__.__name__ + '-updated-group-type'),
+ 'description': 'updated-group-type-desc'
+ }
+ updated_group_type = self.admin_group_types_client.update_group_type(
+ body['id'], **update_params)['group_type']
+ for key, expected_val in update_params.items():
+ self.assertEqual(expected_val, updated_group_type[key])
+
fetched_group_type = self.admin_group_types_client.show_group_type(
body['id'])['group_type']
+ params.update(update_params) # Add updated params to original params.
for key in params.keys():
self.assertEqual(params[key], fetched_group_type[key],
'%s of the fetched group_type is different '
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 68d355c..2f6eb6b 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -17,54 +17,14 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
-class BaseGroupsTest(base.BaseVolumeAdminTest):
-
- def _delete_group(self, grp_id, delete_volumes=True):
- self.groups_client.delete_group(grp_id, delete_volumes)
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp_id:
- self.volumes_client.wait_for_resource_deletion(vol['id'])
- self.groups_client.wait_for_resource_deletion(grp_id)
-
- def _delete_group_snapshot(self, group_snapshot_id, grp_id):
- self.group_snapshots_client.delete_group_snapshot(group_snapshot_id)
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for vol in vols:
- for snap in snapshots:
- if (vol['group_id'] == grp_id and
- vol['id'] == snap['volume_id']):
- self.snapshots_client.wait_for_resource_deletion(
- snap['id'])
- self.group_snapshots_client.wait_for_resource_deletion(
- group_snapshot_id)
-
- def _create_group(self, group_type, volume_type, grp_name=None):
- if not grp_name:
- grp_name = data_utils.rand_name('Group')
- grp = self.groups_client.create_group(
- group_type=group_type['id'],
- volume_types=[volume_type['id']],
- name=grp_name)['group']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_group, grp['id'])
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp['id'], 'available')
- self.assertEqual(grp_name, grp['name'])
- return grp
-
-
-class GroupsTest(BaseGroupsTest):
+class GroupsTest(base.BaseVolumeAdminTest):
_api_version = 3
- min_microversion = '3.14'
+ min_microversion = '3.13'
max_microversion = 'latest'
@decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
@@ -77,13 +37,15 @@
# Create group
grp1_name = data_utils.rand_name('Group1')
- grp1 = self._create_group(group_type, volume_type,
- grp_name=grp1_name)
+ grp1 = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp1_name)
grp1_id = grp1['id']
grp2_name = data_utils.rand_name('Group2')
- grp2 = self._create_group(group_type, volume_type,
- grp_name=grp2_name)
+ grp2 = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp2_name)
grp2_id = grp2['id']
# Create volume
@@ -108,16 +70,16 @@
self.assertEqual(grp2_id, grp2['id'])
# Get all groups with detail
- grps = self.groups_client.list_groups(
- detail=True)['groups']
- filtered_grps = [g for g in grps if g['id'] in [grp1_id, grp2_id]]
- self.assertEqual(2, len(filtered_grps))
- for grp in filtered_grps:
- self.assertEqual([volume_type['id']], grp['volume_types'])
- self.assertEqual(group_type['id'], grp['group_type'])
+ grps = self.groups_client.list_groups(detail=True)['groups']
+ for grp_id in [grp1_id, grp2_id]:
+ filtered_grps = [g for g in grps if g['id'] == grp_id]
+ self.assertEqual(1, len(filtered_grps))
+ self.assertEqual([volume_type['id']],
+ filtered_grps[0]['volume_types'])
+ self.assertEqual(group_type['id'],
+ filtered_grps[0]['group_type'])
- vols = self.volumes_client.list_volumes(
- detail=True)['volumes']
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
filtered_vols = [v for v in vols if v['id'] in [vol1_id]]
self.assertEqual(1, len(filtered_vols))
for vol in filtered_vols:
@@ -125,143 +87,12 @@
# Delete group
# grp1 has a volume so delete_volumes flag is set to True by default
- self._delete_group(grp1_id)
+ self.delete_group(grp1_id)
# grp2 is empty so delete_volumes flag can be set to False
- self._delete_group(grp2_id, delete_volumes=False)
+ self.delete_group(grp2_id, delete_volumes=False)
grps = self.groups_client.list_groups(detail=True)['groups']
self.assertEmpty(grps)
- @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
- def test_group_snapshot_create_show_list_delete(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- vol = self.create_volume(volume_type=volume_type['id'],
- group_id=grp['id'])
-
- # Create group snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (
- self.group_snapshots_client.create_group_snapshot(
- group_id=grp['id'],
- name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_group_snapshot,
- group_snapshot['id'], grp['id'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if vol['id'] == snap['volume_id']:
- waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client,
- group_snapshot['id'], 'available')
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
-
- # Get a given group snapshot
- group_snapshot = self.group_snapshots_client.show_group_snapshot(
- group_snapshot['id'])['group_snapshot']
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
-
- # Get all group snapshots with details, check some detail-specific
- # elements, and look for the created group snapshot
- group_snapshots = (self.group_snapshots_client.list_group_snapshots(
- detail=True)['group_snapshots'])
- for grp_snapshot in group_snapshots:
- self.assertIn('created_at', grp_snapshot)
- self.assertIn('group_id', grp_snapshot)
- self.assertIn((group_snapshot['name'], group_snapshot['id']),
- [(m['name'], m['id']) for m in group_snapshots])
-
- # Delete group snapshot
- self._delete_group_snapshot(group_snapshot['id'], grp['id'])
- group_snapshots = (self.group_snapshots_client.list_group_snapshots()
- ['group_snapshots'])
- self.assertEmpty(group_snapshots)
-
- @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
- def test_create_group_from_group_snapshot(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create Group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- vol = self.create_volume(volume_type=volume_type['id'],
- group_id=grp['id'])
-
- # Create group_snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (
- self.group_snapshots_client.create_group_snapshot(
- group_id=grp['id'],
- name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(self._delete_group_snapshot,
- group_snapshot['id'], grp['id'])
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if vol['id'] == snap['volume_id']:
- waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], 'available')
-
- # Create Group from Group snapshot
- grp_name2 = data_utils.rand_name('Group_from_snap')
- grp2 = self.groups_client.create_group_from_source(
- group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
- self.addCleanup(self._delete_group, grp2['id'])
- self.assertEqual(grp_name2, grp2['name'])
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp2['id']:
- waiters.wait_for_volume_resource_status(
- self.volumes_client, vol['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp2['id'], 'available')
-
- @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
- def test_create_group_from_group(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create Group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
-
- # Create Group from Group
- grp_name2 = data_utils.rand_name('Group_from_grp')
- grp2 = self.groups_client.create_group_from_source(
- source_group_id=grp['id'], name=grp_name2)['group']
- self.addCleanup(self._delete_group, grp2['id'])
- self.assertEqual(grp_name2, grp2['name'])
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp2['id']:
- waiters.wait_for_volume_resource_status(
- self.volumes_client, vol['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp2['id'], 'available')
-
@decorators.idempotent_id('4a8a6fd2-8b3b-4641-8f54-6a6f99320006')
def test_group_update(self):
# Create volume type
@@ -271,7 +102,8 @@
group_type = self.create_group_type()
# Create Group
- grp = self._create_group(group_type, volume_type)
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Create volumes
grp_vols = []
@@ -299,8 +131,7 @@
self.assertEqual(new_desc, grp['description'])
# Get volumes in the group
- vols = self.volumes_client.list_volumes(
- detail=True)['volumes']
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
grp_vols = [v for v in vols if v['group_id'] == grp['id']]
self.assertEqual(1, len(grp_vols))
@@ -318,56 +149,42 @@
self.assertEqual(2, len(grp_vols))
-class GroupsV319Test(BaseGroupsTest):
+class GroupsV314Test(base.BaseVolumeAdminTest):
_api_version = 3
- min_microversion = '3.19'
+ min_microversion = '3.14'
max_microversion = 'latest'
- @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
- def test_reset_group_snapshot_status(self):
+ @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
+ def test_create_group_from_group(self):
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
- # Create group
- group = self._create_group(group_type, volume_type)
+ # Create Group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Create volume
- volume = self.create_volume(volume_type=volume_type['id'],
- group_id=group['id'])
+ self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
- # Create group snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (self.group_snapshots_client.create_group_snapshot(
- group_id=group['id'], name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(self._delete_group_snapshot,
- group_snapshot['id'], group['id'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if volume['id'] == snap['volume_id']:
+ # Create Group from Group
+ grp_name2 = data_utils.rand_name('Group_from_grp')
+ grp2 = self.groups_client.create_group_from_source(
+ source_group_id=grp['id'], name=grp_name2)['group']
+ self.addCleanup(self.delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
+ self.volumes_client, vol['id'], 'available')
waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], 'available')
-
- # Reset group snapshot status
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.group_snapshots_client,
- group_snapshot['id'], 'available')
- self.addCleanup(
- self.admin_group_snapshots_client.reset_group_snapshot_status,
- group_snapshot['id'], 'available')
- for status in ['creating', 'available', 'error']:
- self.admin_group_snapshots_client.reset_group_snapshot_status(
- group_snapshot['id'], status)
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], status)
+ self.groups_client, grp2['id'], 'available')
-class GroupsV320Test(BaseGroupsTest):
+class GroupsV320Test(base.BaseVolumeAdminTest):
_api_version = 3
min_microversion = '3.20'
max_microversion = 'latest'
@@ -381,7 +198,8 @@
group_type = self.create_group_type()
# Create group
- group = self._create_group(group_type, volume_type)
+ group = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Reset group status
self.addCleanup(waiters.wait_for_volume_resource_status,
diff --git a/tempest/api/volume/admin/test_snapshot_manage.py b/tempest/api/volume/admin/test_snapshot_manage.py
index 9ff7160..37a47ec 100644
--- a/tempest/api/volume/admin/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/test_snapshot_manage.py
@@ -35,6 +35,9 @@
def skip_checks(cls):
super(SnapshotManageAdminTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
if not CONF.volume_feature_enabled.manage_snapshot:
raise cls.skipException("Manage snapshot tests are disabled")
@@ -60,7 +63,7 @@
# Verify the original snapshot does not exist in snapshot list
params = {'all_tenants': 1}
all_snapshots = self.admin_snapshots_client.list_snapshots(
- detail=True, params=params)['snapshots']
+ detail=True, **params)['snapshots']
self.assertNotIn(snapshot['id'], [v['id'] for v in all_snapshots])
# Manage the snapshot
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index d56f1de..6f9daa8 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -19,7 +19,8 @@
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups']
+QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
+ 'backup_gigabytes', 'per_volume_gigabytes']
QUOTA_USAGE_KEYS = ['reserved', 'limit', 'in_use']
@@ -37,7 +38,6 @@
def setup_credentials(cls):
super(BaseVolumeQuotasAdminTestJSON, cls).setup_credentials()
cls.demo_tenant_id = cls.os_primary.credentials.tenant_id
- cls.alt_client = cls.os_alt.volumes_client_latest
@classmethod
def setup_clients(cls):
@@ -67,7 +67,9 @@
new_quota_set = {'gigabytes': 1009,
'volumes': 11,
'snapshots': 11,
- 'backups': 11}
+ 'backups': 11,
+ 'backup_gigabytes': 1009,
+ 'per_volume_gigabytes': 1009}
# Update limits for all quota resources
quota_set = self.admin_quotas_client.update_quota_set(
@@ -147,7 +149,8 @@
self.demo_tenant_id, params={'usage': True})['quota_set']
alt_quota = self.admin_quotas_client.show_quota_set(
- self.alt_client.tenant_id, params={'usage': True})['quota_set']
+ self.os_alt.volumes_client_latest.tenant_id,
+ params={'usage': True})['quota_set']
# Creates a volume transfer
transfer = self.transfer_client.create_volume_transfer(
@@ -161,14 +164,15 @@
# Verify volume transferred is available
waiters.wait_for_volume_resource_status(
- self.alt_client, volume['id'], 'available')
+ self.os_alt.volumes_client_latest, volume['id'], 'available')
# List of tenants quota usage post transfer
new_primary_quota = self.admin_quotas_client.show_quota_set(
self.demo_tenant_id, params={'usage': True})['quota_set']
new_alt_quota = self.admin_quotas_client.show_quota_set(
- self.alt_client.tenant_id, params={'usage': True})['quota_set']
+ self.os_alt.volumes_client_latest.tenant_id,
+ params={'usage': True})['quota_set']
# Verify tenants quota usage was updated
self.assertEqual(primary_quota['volumes']['in_use'] -
diff --git a/tempest/api/volume/admin/test_volume_type_access.py b/tempest/api/volume/admin/test_volume_type_access.py
index e93bcb5..b64face 100644
--- a/tempest/api/volume/admin/test_volume_type_access.py
+++ b/tempest/api/volume/admin/test_volume_type_access.py
@@ -27,11 +27,6 @@
credentials = ['primary', 'alt', 'admin']
- @classmethod
- def setup_clients(cls):
- super(VolumeTypesAccessTest, cls).setup_clients()
- cls.alt_client = cls.os_alt.volumes_client_latest
-
@decorators.idempotent_id('d4dd0027-835f-4554-a6e5-50903fb79184')
def test_volume_type_access_add(self):
# Creating a NON public volume type
@@ -70,10 +65,11 @@
# Adding volume type access for alt tenant
self.admin_volume_types_client.add_type_access(
- volume_type['id'], project=self.alt_client.tenant_id)
+ volume_type['id'],
+ project=self.os_alt.volumes_client_latest.tenant_id)
self.addCleanup(self.admin_volume_types_client.remove_type_access,
volume_type['id'],
- project=self.alt_client.tenant_id)
+ project=self.os_alt.volumes_client_latest.tenant_id)
# List tenant access for the given volume type
type_access_list = self.admin_volume_types_client.list_type_access(
@@ -88,5 +84,5 @@
# Validating the permitted tenants are the expected tenants
self.assertIn(self.volumes_client.tenant_id,
map(operator.itemgetter('project_id'), type_access_list))
- self.assertIn(self.alt_client.tenant_id,
+ self.assertIn(self.os_alt.volumes_client_latest.tenant_id,
map(operator.itemgetter('project_id'), type_access_list))
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index af1024c..1077524 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -161,6 +161,12 @@
'The fetched encryption_type %s is different '
'from the updated encryption_type' % key)
+ # Get encryption specs item
+ key = 'cipher'
+ item = self.admin_encryption_types_client.show_encryption_specs_item(
+ encrypt_type_id, key)
+ self.assertEqual(update_kwargs[key], item[key])
+
# Delete encryption type
self.admin_encryption_types_client.delete_encryption_type(
encrypt_type_id)
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index b5a2fb7..730acdf 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -46,14 +46,32 @@
self.volume_type['id'], extra_specs)['extra_specs']
self.assertEqual(extra_specs, body,
"Volume type extra spec incorrectly created")
+
+ # Only update an extra spec
spec_key = "spec2"
extra_spec = {spec_key: "val2"}
body = self.admin_volume_types_client.update_volume_type_extra_specs(
self.volume_type['id'], spec_key, extra_spec)
self.assertIn(spec_key, body)
+ self.assertEqual(extra_spec[spec_key], body[spec_key])
+ body = self.admin_volume_types_client.show_volume_type_extra_specs(
+ self.volume_type['id'], spec_key)
+ self.assertIn(spec_key, body)
self.assertEqual(extra_spec[spec_key], body[spec_key],
"Volume type extra spec incorrectly updated")
+ # Update an existing extra spec and create a new extra spec
+ extra_specs = {spec_key: "val3", "spec4": "val4"}
+ body = self.admin_volume_types_client.create_volume_type_extra_specs(
+ self.volume_type['id'], extra_specs)['extra_specs']
+ self.assertEqual(extra_specs, body)
+ body = self.admin_volume_types_client.list_volume_types_extra_specs(
+ self.volume_type['id'])['extra_specs']
+ for key in extra_specs:
+ self.assertIn(key, body)
+ self.assertEqual(extra_specs[key], body[key],
+ "Volume type extra spec incorrectly created")
+
@decorators.idempotent_id('d4772798-601f-408a-b2a5-29e8a59d1220')
def test_volume_type_extra_spec_create_get_delete(self):
# Create/Get/Delete volume type extra spec.
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 8d09217..3e0deef 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -37,7 +37,7 @@
@decorators.idempotent_id('d063f96e-a2e0-4f34-8b8a-395c42de1845')
def test_volume_reset_status(self):
- # test volume reset status : available->error->available
+ # test volume reset status : available->error->available->maintenance
volume = self.create_volume()
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, volume['id'], 'available')
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 63ef85b..81fd6e6 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -31,6 +31,11 @@
"""Base test case class for all Cinder API tests."""
_api_version = 2
+ # if api_v2 is not enabled while api_v3 is enabled, the volume v2 classes
+ # should be transferred to volume v3 classes.
+ if (not CONF.volume_feature_enabled.api_v2 and
+ CONF.volume_feature_enabled.api_v3):
+ _api_version = 3
credentials = ['primary']
@classmethod
@@ -101,20 +106,12 @@
cls.min_microversion,
CONF.volume.min_microversion))
- cls.snapshots = []
- cls.volumes = []
cls.image_ref = CONF.compute.image_ref
cls.flavor_ref = CONF.compute.flavor_ref
cls.build_interval = CONF.volume.build_interval
cls.build_timeout = CONF.volume.build_timeout
@classmethod
- def resource_cleanup(cls):
- cls.clear_snapshots()
- cls.clear_volumes()
- super(BaseVolumeTest, cls).resource_cleanup()
-
- @classmethod
def create_volume(cls, wait_until='available', **kwargs):
"""Wrapper utility that returns a test volume.
@@ -133,7 +130,9 @@
kwargs['name'] = name
volume = cls.volumes_client.create_volume(**kwargs)['volume']
- cls.volumes.append(volume)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.delete_volume, cls.volumes_client,
+ volume['id'])
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], wait_until)
return volume
@@ -147,7 +146,8 @@
snapshot = cls.snapshots_client.create_snapshot(
volume_id=volume_id, **kwargs)['snapshot']
- cls.snapshots.append(snapshot['id'])
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.delete_snapshot, snapshot['id'])
waiters.wait_for_volume_resource_status(cls.snapshots_client,
snapshot['id'], 'available')
return snapshot
@@ -176,14 +176,13 @@
client.delete_volume(volume_id)
client.wait_for_resource_deletion(volume_id)
- def delete_snapshot(self, snapshot_id, snapshots_client=None):
+ @classmethod
+ def delete_snapshot(cls, snapshot_id, snapshots_client=None):
"""Delete snapshot by the given client"""
if snapshots_client is None:
- snapshots_client = self.snapshots_client
+ snapshots_client = cls.snapshots_client
snapshots_client.delete_snapshot(snapshot_id)
snapshots_client.wait_for_resource_deletion(snapshot_id)
- if snapshot_id in self.snapshots:
- self.snapshots.remove(snapshot_id)
def attach_volume(self, server_id, volume_id):
"""Attach a volume to a server"""
@@ -197,31 +196,6 @@
self.addCleanup(self.servers_client.detach_volume, server_id,
volume_id)
- @classmethod
- def clear_volumes(cls):
- for volume in cls.volumes:
- try:
- cls.volumes_client.delete_volume(volume['id'])
- except Exception:
- pass
-
- for volume in cls.volumes:
- try:
- cls.volumes_client.wait_for_resource_deletion(volume['id'])
- except Exception:
- pass
-
- @classmethod
- def clear_snapshots(cls):
- for snapshot in cls.snapshots:
- test_utils.call_and_ignore_notfound_exc(
- cls.snapshots_client.delete_snapshot, snapshot)
-
- for snapshot in cls.snapshots:
- test_utils.call_and_ignore_notfound_exc(
- cls.snapshots_client.wait_for_resource_deletion,
- snapshot)
-
def create_server(self, wait_until='ACTIVE', **kwargs):
name = kwargs.pop(
'name',
@@ -242,6 +216,27 @@
self.servers_client.delete_server, body['id'])
return body
+ def create_group(self, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '-Group')
+
+ group = self.groups_client.create_group(**kwargs)['group']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.delete_group, group['id'])
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, group['id'], 'available')
+ return group
+
+ def delete_group(self, group_id, delete_volumes=True):
+ self.groups_client.delete_group(group_id, delete_volumes)
+ if delete_volumes:
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == group_id:
+ self.volumes_client.wait_for_resource_deletion(vol['id'])
+ self.groups_client.wait_for_resource_deletion(group_id)
+
class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
@@ -282,26 +277,13 @@
cls.os_admin.volume_scheduler_stats_v2_client
@classmethod
- def resource_setup(cls):
- super(BaseVolumeAdminTest, cls).resource_setup()
-
- cls.qos_specs = []
- cls.volume_types = []
-
- @classmethod
- def resource_cleanup(cls):
- cls.clear_qos_specs()
- super(BaseVolumeAdminTest, cls).resource_cleanup()
- cls.clear_volume_types()
-
- @classmethod
def create_test_qos_specs(cls, name=None, consumer=None, **kwargs):
"""create a test Qos-Specs."""
name = name or data_utils.rand_name(cls.__name__ + '-QoS')
consumer = consumer or 'front-end'
qos_specs = cls.admin_volume_qos_client.create_qos(
name=name, consumer=consumer, **kwargs)['qos_specs']
- cls.qos_specs.append(qos_specs['id'])
+ cls.addClassResourceCleanup(cls.clear_qos_spec, qos_specs['id'])
return qos_specs
@classmethod
@@ -310,7 +292,7 @@
name = name or data_utils.rand_name(cls.__name__ + '-volume-type')
volume_type = cls.admin_volume_types_client.create_volume_type(
name=name, **kwargs)['volume_type']
- cls.volume_types.append(volume_type['id'])
+ cls.addClassResourceCleanup(cls.clear_volume_type, volume_type['id'])
return volume_type
def create_group_type(self, name=None, **kwargs):
@@ -324,22 +306,18 @@
return group_type
@classmethod
- def clear_qos_specs(cls):
- for qos_id in cls.qos_specs:
- test_utils.call_and_ignore_notfound_exc(
- cls.admin_volume_qos_client.delete_qos, qos_id)
+ def clear_qos_spec(cls, qos_id):
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volume_qos_client.delete_qos, qos_id)
- for qos_id in cls.qos_specs:
- test_utils.call_and_ignore_notfound_exc(
- cls.admin_volume_qos_client.wait_for_resource_deletion, qos_id)
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volume_qos_client.wait_for_resource_deletion, qos_id)
@classmethod
- def clear_volume_types(cls):
- for vol_type in cls.volume_types:
- test_utils.call_and_ignore_notfound_exc(
- cls.admin_volume_types_client.delete_volume_type, vol_type)
+ def clear_volume_type(cls, vol_type_id):
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volume_types_client.delete_volume_type, vol_type_id)
- for vol_type in cls.volume_types:
- test_utils.call_and_ignore_notfound_exc(
- cls.admin_volume_types_client.wait_for_resource_deletion,
- vol_type)
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volume_types_client.wait_for_resource_deletion,
+ vol_type_id)
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 1e240b8..552b231 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -40,7 +40,7 @@
backup_id)['restore']
# Delete backup
- self.addCleanup(self.volumes_client.delete_volume,
+ self.addCleanup(self.delete_volume, self.volumes_client,
restored_volume['volume_id'])
self.assertEqual(backup_id, restored_volume['backup_id'])
waiters.wait_for_volume_resource_status(self.backups_client,
@@ -59,8 +59,7 @@
"vol-meta2": "value2",
"vol-meta3": "value3"}
volume = self.create_volume(metadata=metadata)
- self.addCleanup(self.volumes_client.delete_volume,
- volume['id'])
+ self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
# Create a backup
backup_name = data_utils.rand_name(
@@ -109,8 +108,7 @@
"""
# Create a server
volume = self.create_volume()
- self.addCleanup(self.volumes_client.delete_volume,
- volume['id'])
+ self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
server = self.create_server()
# Attach volume to instance
self.attach_volume(server['id'], volume['id'])
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 1eb76a0..54052ae 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -13,12 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
import testtools
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -53,3 +57,125 @@
resized_volume = self.volumes_client.show_volume(
volume['id'])['volume']
self.assertEqual(extend_size, resized_volume['size'])
+
+
+class VolumesExtendAttachedTest(base.BaseVolumeTest):
+ """Tests extending the size of an attached volume."""
+
+ # We need admin credentials for getting instance action event details. By
+ # default a non-admin can list and show instance actions if they own the
+ # server instance, but since the event details can contain error messages
+ # and tracebacks, like an instance fault, those are not viewable by
+ # non-admins. This is obviously not a great user experience since the user
+ # may not know when the operation is actually complete. A microversion in
+ # the compute API will be added so that non-admins can see instance action
+ # events but will continue to hide the traceback field.
+ # TODO(mriedem): Change this to not rely on the admin user to get the event
+ # details once that microversion is available in Nova.
+ credentials = ['primary', 'admin']
+
+ _api_version = 3
+ # NOTE(mriedem): The minimum required volume API version is 3.42 and the
+ # minimum required compute API microversion is 2.51, but the compute call
+ # is implicit - Cinder calls Nova at that microversion, Tempest does not.
+ min_microversion = '3.42'
+
+ def _find_extend_volume_instance_action(self, server_id):
+ actions = self.servers_client.list_instance_actions(
+ server_id)['instanceActions']
+ for action in actions:
+ if action['action'] == 'extend_volume':
+ return action
+
+ def _find_extend_volume_instance_action_finish_event(self, action):
+ # This has to be called by an admin client otherwise
+ # the events don't show up.
+ action = self.os_admin.servers_client.show_instance_action(
+ action['instance_uuid'], action['request_id'])['instanceAction']
+ for event in action['events']:
+ if (event['event'] == 'compute_extend_volume' and
+ event['finish_time']):
+ return event
+
+ @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
+ @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
+ "Attached volume extend is disabled.")
+ @utils.services('compute')
+ def test_extend_attached_volume(self):
+ """This is a happy path test which does the following:
+
+ * Create a volume at the configured volume_size.
+ * Create a server instance.
+ * Attach the volume to the server.
+ * Wait for the volume status to be "in-use".
+ * Extend the size of the volume and wait for the volume status to go
+ back to "in-use".
+ * Assert the volume size change is reflected in the volume API.
+ * Wait for the "compute_extend_volume" instance action event to show
+ up in the compute API with the success or failure status. We fail
+ if we timeout waiting for the instance action event to show up, or
+ if the action on the server fails.
+ """
+ # Create a test volume. Will be automatically cleaned up on teardown.
+ volume = self.create_volume()
+ # Create a test server. Will be automatically cleaned up on teardown.
+ server = self.create_server()
+ # Attach the volume to the server and wait for the volume status to be
+ # "in-use".
+ self.attach_volume(server['id'], volume['id'])
+ # Extend the size of the volume. If this is successful, the volume API
+ # will change the status on the volume to "extending" before doing an
+ # RPC cast to the volume manager on the backend. Note that we multiply
+ # the size of the volume since certain Cinder backends, e.g. ScaleIO,
+ # require multiples of 8GB.
+ extend_size = volume['size'] * 2
+ self.volumes_client.extend_volume(volume['id'], new_size=extend_size)
+ # The volume status should go back to in-use since it is still attached
+ # to the server instance.
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
+ # Assert that the volume size has changed in the volume API.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+ self.assertEqual(extend_size, volume['size'])
+ # Now we wait for the "compute_extend_volume" instance action event
+ # to show up for the server instance. This is our indication that the
+ # asynchronous operation is complete on the compute side.
+ start_time = int(time.time())
+ timeout = self.servers_client.build_timeout
+ action = self._find_extend_volume_instance_action(server['id'])
+ while action is None and int(time.time()) - start_time < timeout:
+ time.sleep(self.servers_client.build_interval)
+ action = self._find_extend_volume_instance_action(server['id'])
+
+ if action is None:
+ msg = ("Timed out waiting to get 'extend_volume' instance action "
+ "record for server %(server)s after %(timeout)s seconds." %
+ {'server': server['id'], 'timeout': timeout})
+ raise lib_exc.TimeoutException(msg)
+
+ # Now that we found the extend_volume instance action, we can wait for
+ # the compute_extend_volume instance action event to show up to
+ # indicate the operation is complete.
+ start_time = int(time.time())
+ event = self._find_extend_volume_instance_action_finish_event(action)
+ while event is None and int(time.time()) - start_time < timeout:
+ time.sleep(self.servers_client.build_interval)
+ event = self._find_extend_volume_instance_action_finish_event(
+ action)
+
+ if event is None:
+ msg = ("Timed out waiting to get 'compute_extend_volume' instance "
+ "action event record for server %(server)s and request "
+ "%(request_id)s after %(timeout)s seconds." %
+ {'server': server['id'],
+ 'request_id': action['request_id'],
+ 'timeout': timeout})
+ raise lib_exc.TimeoutException(msg)
+
+ # Finally, assert that the action completed successfully.
+ self.assertTrue(
+ event['result'].lower() == 'success',
+ "Unexpected compute_extend_volume result '%(result)s' for request "
+ "%(request_id)s." %
+ {'result': event['result'],
+ 'request_id': action['request_id']})
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index b5f98ea..d5358ab 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -26,15 +26,28 @@
class VolumesListTestJSON(base.BaseVolumeTest):
- # NOTE: This test creates a number of 1G volumes. To run successfully,
- # ensure that the backing file for the volume group that Nova uses
+ # NOTE: This test creates a number of 1G volumes. To run it successfully,
+ # ensure that the backing file for the volume group that Cinder uses
# has space for at least 3 1G volumes!
# If you are running a Devstack environment, ensure that the
# VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
VOLUME_FIELDS = ('id', 'name')
- def assertVolumesIn(self, fetched_list, expected_list, fields=None):
+ @classmethod
+ def _remove_volatile_fields(cls, fetched_list):
+ """Remove fields that should not be compared.
+
+ This method makes sure that Tempest does not compare e.g.
+ the volume's "updated_at" field that may change for any reason
+ internal to the operation of Cinder.
+ """
+ for volume in fetched_list:
+ for field in ('updated_at',):
+ if field in volume:
+ del volume[field]
+
+ def _assert_volumes_in(self, fetched_list, expected_list, fields=None):
"""Check out the list.
This function is aim at check out whether all of the volumes in
@@ -45,6 +58,8 @@
expected_list = map(fieldsgetter, expected_list)
fetched_list = [fieldsgetter(item) for item in fetched_list]
+ # Hopefully the expected_list has already been cleaned.
+ self._remove_volatile_fields(fetched_list)
missing_vols = [v for v in expected_list if v not in fetched_list]
if not missing_vols:
return
@@ -72,6 +87,7 @@
volume = cls.volumes_client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
+ cls._remove_volatile_fields(cls.volume_list)
def _list_by_param_value_and_assert(self, params, with_detail=False):
"""list or list_details with given params and validates result"""
@@ -103,15 +119,15 @@
# Get a list of Volumes
# Fetch all volumes
fetched_list = self.volumes_client.list_volumes()['volumes']
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('adcbb5a7-5ad8-4b61-bd10-5380e111a877')
def test_volume_list_with_details(self):
# Get a list of Volumes with details
# Fetch all Volumes
fetched_list = self.volumes_client.list_volumes(detail=True)['volumes']
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('a28e8da4-0b56-472f-87a8-0f4d3f819c02')
def test_volume_list_by_name(self):
@@ -137,8 +153,8 @@
fetched_list = self.volumes_client.list_volumes(
params=params)['volumes']
self._list_by_param_value_and_assert(params)
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('2943f712-71ec-482a-bf49-d5ca06216b9f')
def test_volumes_list_details_by_status(self):
@@ -147,7 +163,7 @@
detail=True, params=params)['volumes']
for volume in fetched_list:
self.assertEqual('available', volume['status'])
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('2016a942-3020-40d7-95ce-7613bf8407ce')
def test_volumes_list_by_bootable(self):
@@ -160,8 +176,8 @@
fetched_list = self.volumes_client.list_volumes(
params=params)['volumes']
self._list_by_param_value_and_assert(params)
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('2016a939-72ec-482a-bf49-d5ca06216b9f')
def test_volumes_list_details_by_bootable(self):
@@ -170,7 +186,7 @@
detail=True, params=params)['volumes']
for volume in fetched_list:
self.assertEqual('false', volume['bootable'])
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('c0cfa863-3020-40d7-b587-e35f597d5d87')
def test_volumes_list_by_availability_zone(self):
@@ -180,8 +196,8 @@
fetched_list = self.volumes_client.list_volumes(
params=params)['volumes']
self._list_by_param_value_and_assert(params)
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('e1b80d13-94f0-4ba2-a40e-386af29f8db1')
def test_volumes_list_details_by_availability_zone(self):
@@ -192,7 +208,7 @@
detail=True, params=params)['volumes']
for volume in fetched_list:
self.assertEqual(zone, volume['availability_zone'])
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('b5ebea1b-0603-40a0-bb41-15fcd0a53214')
def test_volume_list_with_param_metadata(self):
diff --git a/tempest/clients.py b/tempest/clients.py
index e617c3c..b06eafb 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -17,7 +17,6 @@
from tempest.lib import auth
from tempest.lib import exceptions as lib_exc
from tempest.lib.services import clients
-from tempest.services import object_storage
CONF = config.CONF
@@ -25,8 +24,6 @@
class Manager(clients.ServiceClients):
"""Top level manager for OpenStack tempest clients"""
- default_params = config.service_client_config()
-
def __init__(self, credentials, scope='project'):
"""Initialization of Manager class.
@@ -47,6 +44,10 @@
self._set_object_storage_clients()
self._set_image_clients()
self._set_network_clients()
+ # TODO(andreaf) This is maintained for backward compatibility
+ # with plugins, but it should removed eventually, since it was
+ # never a stable interface and it's not useful anyways
+ self.default_params = config.service_client_config()
def _set_network_clients(self):
self.network_agents_client = self.network.AgentsClient()
@@ -233,7 +234,9 @@
self.volumes_client = self.volume_v1.VolumesClient()
self.volumes_extension_client = self.volume_v1.ExtensionsClient()
- if CONF.volume_feature_enabled.api_v2:
+ # if only api_v3 is enabled, all these clients should be available
+ if (CONF.volume_feature_enabled.api_v2 or
+ CONF.volume_feature_enabled.api_v3):
self.backups_v2_client = self.volume_v2.BackupsClient()
self.encryption_types_v2_client = \
self.volume_v2.EncryptionTypesClient()
@@ -281,21 +284,11 @@
self.snapshots_client_latest = self.snapshots_v3_client
def _set_object_storage_clients(self):
- # NOTE(andreaf) Load configuration from config. Once object storage
- # is in lib, configuration will be pulled directly from the registry
- # and this will not be required anymore.
- params = config.service_client_config('object-storage')
-
- self.account_client = object_storage.AccountClient(self.auth_provider,
- **params)
- self.bulk_client = object_storage.BulkMiddlewareClient(
- self.auth_provider, **params)
- self.capabilities_client = object_storage.CapabilitiesClient(
- self.auth_provider, **params)
- self.container_client = object_storage.ContainerClient(
- self.auth_provider, **params)
- self.object_client = object_storage.ObjectClient(self.auth_provider,
- **params)
+ self.account_client = self.object_storage.AccountClient()
+ self.bulk_client = self.object_storage.BulkMiddlewareClient()
+ self.capabilities_client = self.object_storage.CapabilitiesClient()
+ self.container_client = self.object_storage.ContainerClient()
+ self.object_client = self.object_storage.ObjectClient()
def get_auth_provider_class(credentials):
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 8636405..1c671ec 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -15,18 +15,18 @@
# under the License.
"""
-Utility for creating **accounts.yaml** file for concurrent test runs.
+Utility for creating ``accounts.yaml`` file for concurrent test runs.
Creates one primary user, one alt user, one swift admin, one stack owner
and one admin (optionally) for each concurrent thread. The utility creates
-user for each tenant. The **accounts.yaml** file will be valid and contain
+user for each tenant. The ``accounts.yaml`` file will be valid and contain
credentials for created users, so each user will be in separate tenant and
have the username, tenant_name, password and roles.
-**Usage:** ``tempest account-generator [-h] [OPTIONS] accounts_file.yaml``.
+**Usage:** ``tempest account-generator [-h] [OPTIONS] accounts_file.yaml``
Positional Arguments
--------------------
-**accounts_file.yaml** (Required) Provide an output accounts yaml file. Utility
+``accounts_file.yaml`` (Required) Provide an output accounts yaml file. Utility
creates a .yaml file in the directory where the command is ran. The appropriate
name for the file is *accounts.yaml* and it should be placed in *tempest/etc*
directory.
@@ -40,55 +40,62 @@
You're probably familiar with these, but just to remind:
-======== ======================== ====================
-Param CLI Environment Variable
-======== ======================== ====================
-Username --os-username OS_USERNAME
-Password --os-password OS_PASSWORD
-Project --os-project-name OS_PROJECT_NAME
-Tenant --os-tenant-name (depr.) OS_TENANT_NAME
-Domain --os-domain-name OS_DOMAIN_NAME
-======== ======================== ====================
+======== ============================ ====================
+Param CLI Environment Variable
+======== ============================ ====================
+Username ``--os-username`` OS_USERNAME
+Password ``--os-password`` OS_PASSWORD
+Project ``--os-project-name`` OS_PROJECT_NAME
+Tenant ``--os-tenant-name`` (depr.) OS_TENANT_NAME
+Domain ``--os-domain-name`` OS_DOMAIN_NAME
+======== ============================ ====================
Optional Arguments
------------------
-**-h**, **--help** (Optional) Shows help message with the description of
-utility and its arguments, and exits.
+* ``-h, --help`` (Optional) Shows help message with the description of
+ utility and its arguments, and exits.
-**c /etc/tempest.conf**, **--config-file /etc/tempest.conf** (Optional) Path to
-tempest config file.
+* ``-c, --config-file /etc/tempest.conf`` (Optional) Path
+ to tempest config file. If not specified, it searches for tempest.conf in
+ these locations:
-**--os-username <auth-user-name>** (Optional) Name used for authentication with
-the OpenStack Identity service. Defaults to env[OS_USERNAME]. Note: User should
-have permissions to create new user accounts and tenants.
+ - ./etc/
+ - /etc/tempest
+ - ~/.tempest/
+ - ~/
+ - /etc/
-**--os-password <auth-password>** (Optional) Password used for authentication
-with the OpenStack Identity service. Defaults to env[OS_PASSWORD].
+* ``--os-username <auth-user-name>`` (Optional) Name used for authentication
+ with the OpenStack Identity service. Defaults to env[OS_USERNAME]. Note: User
+ should have permissions to create new user accounts and tenants.
-**--os-project-name <auth-project-name>** (Optional) Project to request
-authorization on. Defaults to env[OS_PROJECT_NAME].
+* ``--os-password <auth-password>`` (Optional) Password used for authentication
+ with the OpenStack Identity service. Defaults to env[OS_PASSWORD].
-**--os-tenant-name <auth-tenant-name>** (Optional, deprecated) Tenant to
-request authorization on. Defaults to env[OS_TENANT_NAME].
+* ``--os-project-name <auth-project-name>`` (Optional) Project to request
+ authorization on. Defaults to env[OS_PROJECT_NAME].
-**--os-domain-name <auth-domain-name>** (Optional) Domain the user and project
-belong to. Defaults to env[OS_DOMAIN_NAME].
+* ``--os-tenant-name <auth-tenant-name>`` (Optional, deprecated) Tenant to
+ request authorization on. Defaults to env[OS_TENANT_NAME].
-**--tag TAG** (Optional) Resources tag. Each created resource (user, project)
-will have the prefix with the given TAG in its name. Using tag is recommended
-for the further using, cleaning resources.
+* ``--os-domain-name <auth-domain-name>`` (Optional) Domain the user and
+ project belong to. Defaults to env[OS_DOMAIN_NAME].
-**-r CONCURRENCY**, **--concurrency CONCURRENCY** (Required) Concurrency count
-(default: 1). The number of accounts required can be estimated as
-CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
-a different tenant. This is required to provide isolation between test for
-running in parallel.
+* ``--tag TAG`` (Optional) Resources tag. Each created resource (user, project)
+ will have the prefix with the given TAG in its name. Using tag is recommended
+ for the further using, cleaning resources.
-**--with-admin** (Optional) Creates admin for each concurrent group
-(default: False).
+* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count
+ (default: 1). The number of accounts required can be estimated as
+ CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
+ a different tenant. This is required to provide isolation between test for
+ running in parallel.
-**-i VERSION**, **--identity-version VERSION** (Optional) Provisions accounts
-using the specified version of the identity API. (default: '3').
+* ``--with-admin`` (Optional) Creates admin for each concurrent group
+ (default: False).
+
+* ``-i, --identity-version VERSION`` (Optional) Provisions accounts
+ using the specified version of the identity API. (default: '3').
To see help on specific argument, please do: ``tempest account-generator
[OPTIONS] <accounts_file.yaml> -h``.
@@ -155,9 +162,7 @@
if CONF.service_available.swift:
spec.append([CONF.object_storage.operator_role])
spec.append([CONF.object_storage.reseller_admin_role])
- if CONF.service_available.heat:
- spec.append([CONF.orchestration.stack_owner_role,
- CONF.object_storage.operator_role])
+ spec.append([CONF.object_storage.operator_role])
if admin:
spec.append('admin')
resources = []
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index a128b3f..29abd49 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -28,45 +28,48 @@
Example Run
-----------
-**WARNING: If step 1 is skipped in the example below, the cleanup procedure
-may delete resources that existed in the cloud before the test run. This
-may cause an unwanted destruction of cloud resources, so use caution with
-this command.**
+.. warning::
-``$ tempest cleanup --init-saved-state``
+ If step 1 is skipped in the example below, the cleanup procedure
+ may delete resources that existed in the cloud before the test run. This
+ may cause an unwanted destruction of cloud resources, so use caution with
+ this command.
-``$ # Actual running of Tempest tests``
+ Examples::
-``$ tempest cleanup``
+ $ tempest cleanup --init-saved-state
+ $ # Actual running of Tempest tests
+ $ tempest cleanup
Runtime Arguments
-----------------
-**--init-saved-state**: Initializes the saved state of the OpenStack deployment
-and will output a ``saved_state.json`` file containing resources from your
-deployment that will be preserved from the cleanup command. This should be
-done prior to running Tempest tests.
+* ``--init-saved-state``: Initializes the saved state of the OpenStack
+ deployment and will output a ``saved_state.json`` file containing resources
+ from your deployment that will be preserved from the cleanup command. This
+ should be done prior to running Tempest tests.
-**--delete-tempest-conf-objects**: If option is present, then the command will
-delete the admin project in addition to the resources associated with them on
-clean up. If option is not present, the command will delete the resources
-associated with the Tempest and alternate Tempest users and projects but will
-not delete the projects themselves.
+* ``--delete-tempest-conf-objects``: If option is present, then the command
+ will delete the admin project in addition to the resources associated with
+ them on clean up. If option is not present, the command will delete the
+ resources associated with the Tempest and alternate Tempest users and
+ projects but will not delete the projects themselves.
-**--dry-run**: Creates a report (``./dry_run.json``) of the projects that will
-be cleaned up (in the ``_tenants_to_clean`` dictionary [1]_) and the global
-objects that will be removed (domains, flavors, images, roles, projects,
-and users). Once the cleanup command is executed (e.g. run without
-parameters), running it again with **--dry-run** should yield an empty report.
+* ``--dry-run``: Creates a report (``./dry_run.json``) of the projects that
+ will be cleaned up (in the ``_projects_to_clean`` dictionary [1]_) and the
+ global objects that will be removed (domains, flavors, images, roles,
+ projects, and users). Once the cleanup command is executed (e.g. run without
+ parameters), running it again with ``--dry-run`` should yield an empty
+ report.
-**--help**: Print the help text for the command and parameters.
+* ``--help``: Print the help text for the command and parameters.
-.. [1] The ``_tenants_to_clean`` dictionary in ``dry_run.json`` lists the
+.. [1] The ``_projects_to_clean`` dictionary in ``dry_run.json`` lists the
projects that ``tempest cleanup`` will loop through to delete child
objects, but the command will, by default, not delete the projects
- themselves. This may differ from the ``tenants`` list as you can clean
+ themselves. This may differ from the ``projects`` list as you can clean
the Tempest and alternate Tempest users and projects but they will not be
- deleted unless the **--delete-tempest-conf-objects** flag is used to
+ deleted unless the ``--delete-tempest-conf-objects`` flag is used to
force their deletion.
"""
@@ -111,13 +114,13 @@
self.admin_id = ""
self.admin_role_id = ""
- self.admin_tenant_id = ""
+ self.admin_project_id = ""
self._init_admin_ids()
self.admin_role_added = []
# available services
- self.tenant_services = cleanup_service.get_tenant_cleanup_services()
+ self.project_services = cleanup_service.get_project_cleanup_services()
self.global_services = cleanup_service.get_global_cleanup_services()
if parsed_args.init_saved_state:
@@ -133,24 +136,24 @@
is_save_state = False
if is_dry_run:
- self.dry_run_data["_tenants_to_clean"] = {}
+ self.dry_run_data["_projects_to_clean"] = {}
admin_mgr = self.admin_mgr
- # Always cleanup tempest and alt tempest tenants unless
+ # Always cleanup tempest and alt tempest projects unless
# they are in saved state json. Therefore is_preserve is False
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': False,
'is_save_state': is_save_state}
- tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
- tenants = tenant_service.list()
- print("Process %s tenants" % len(tenants))
+ project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
+ projects = project_service.list()
+ print("Process %s projects" % len(projects))
- # Loop through list of tenants and clean them up.
- for tenant in tenants:
- self._add_admin(tenant['id'])
- self._clean_tenant(tenant)
+ # Loop through list of projects and clean them up.
+ for project in projects:
+ self._add_admin(project['id'])
+ self._clean_project(project)
kwargs = {'data': self.dry_run_data,
'is_dry_run': is_dry_run,
@@ -169,49 +172,51 @@
self._remove_admin_user_roles()
def _remove_admin_user_roles(self):
- tenant_ids = self.admin_role_added
- LOG.debug("Removing admin user roles where needed for tenants: %s",
- tenant_ids)
- for tenant_id in tenant_ids:
- self._remove_admin_role(tenant_id)
+ project_ids = self.admin_role_added
+ LOG.debug("Removing admin user roles where needed for projects: %s",
+ project_ids)
+ for project_id in project_ids:
+ self._remove_admin_role(project_id)
- def _clean_tenant(self, tenant):
- print("Cleaning tenant: %s " % tenant['name'])
+ def _clean_project(self, project):
+ print("Cleaning project: %s " % project['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
is_preserve = not self.options.delete_tempest_conf_objects
- tenant_id = tenant['id']
- tenant_name = tenant['name']
- tenant_data = None
+ project_id = project['id']
+ project_name = project['name']
+ project_data = None
if is_dry_run:
- tenant_data = dry_run_data["_tenants_to_clean"][tenant_id] = {}
- tenant_data['name'] = tenant_name
+ project_data = dry_run_data["_projects_to_clean"][project_id] = {}
+ project_data['name'] = project_name
kwargs = {"username": CONF.auth.admin_username,
"password": CONF.auth.admin_password,
- "tenant_name": tenant['name']}
+ "project_name": project['name']}
mgr = clients.Manager(credentials=credentials.get_credentials(
**kwargs))
- kwargs = {'data': tenant_data,
+ kwargs = {'data': project_data,
'is_dry_run': is_dry_run,
'saved_state_json': None,
'is_preserve': is_preserve,
'is_save_state': False,
- 'tenant_id': tenant_id}
- for service in self.tenant_services:
+ 'project_id': project_id}
+ for service in self.project_services:
svc = service(mgr, **kwargs)
svc.run()
def _init_admin_ids(self):
- tn_cl = self.admin_mgr.tenants_client
- rl_cl = self.admin_mgr.roles_client
+ pr_cl = self.admin_mgr.projects_client
+ rl_cl = self.admin_mgr.roles_v3_client
+ rla_cl = self.admin_mgr.role_assignments_client
+ us_cl = self.admin_mgr.users_v3_client
- tenant = identity.get_tenant_by_name(tn_cl,
- CONF.auth.admin_project_name)
- self.admin_tenant_id = tenant['id']
-
- user = identity.get_user_by_username(tn_cl, self.admin_tenant_id,
- CONF.auth.admin_username)
+ project = identity.get_project_by_name(pr_cl,
+ CONF.auth.admin_project_name)
+ self.admin_project_id = project['id']
+ user = identity.get_user_by_project(us_cl, rla_cl,
+ self.admin_project_id,
+ CONF.auth.admin_username)
self.admin_id = user['id']
roles = rl_cl.list_roles()['roles']
@@ -236,7 +241,7 @@
dest='delete_tempest_conf_objects',
default=False,
help="Force deletion of the tempest and "
- "alternate tempest users and tenants.")
+ "alternate tempest users and projects.")
parser.add_argument('--dry-run', action="store_true",
dest='dry_run', default=False,
help="Generate JSON file:" + DRY_RUN_JSON +
@@ -247,44 +252,44 @@
def get_description(self):
return 'Cleanup after tempest run'
- def _add_admin(self, tenant_id):
- rl_cl = self.admin_mgr.roles_client
+ def _add_admin(self, project_id):
+ rl_cl = self.admin_mgr.roles_v3_client
needs_role = True
- roles = rl_cl.list_user_roles_on_project(tenant_id,
+ roles = rl_cl.list_user_roles_on_project(project_id,
self.admin_id)['roles']
for role in roles:
if role['id'] == self.admin_role_id:
needs_role = False
- LOG.debug("User already had admin privilege for this tenant")
+ LOG.debug("User already had admin privilege for this project")
if needs_role:
- LOG.debug("Adding admin privilege for : %s", tenant_id)
- rl_cl.create_user_role_on_project(tenant_id, self.admin_id,
+ LOG.debug("Adding admin privilege for : %s", project_id)
+ rl_cl.create_user_role_on_project(project_id, self.admin_id,
self.admin_role_id)
- self.admin_role_added.append(tenant_id)
+ self.admin_role_added.append(project_id)
- def _remove_admin_role(self, tenant_id):
- LOG.debug("Remove admin user role for tenant: %s", tenant_id)
+ def _remove_admin_role(self, project_id):
+ LOG.debug("Remove admin user role for projectt: %s", project_id)
# Must initialize Admin Manager for each user role
# Otherwise authentication exception is thrown, weird
id_cl = clients.Manager(
credentials.get_configured_admin_credentials()).identity_client
- if (self._tenant_exists(tenant_id)):
+ if (self._project_exists(project_id)):
try:
- id_cl.delete_role_from_user_on_project(tenant_id,
+ id_cl.delete_role_from_user_on_project(project_id,
self.admin_id,
self.admin_role_id)
except Exception as ex:
- LOG.exception("Failed removing role from tenant which still"
+ LOG.exception("Failed removing role from project which still"
"exists, exception: %s", ex)
- def _tenant_exists(self, tenant_id):
- tn_cl = self.admin_mgr.tenants_client
+ def _project_exists(self, project_id):
+ pr_cl = self.admin_mgr.projects_client
try:
- t = tn_cl.show_tenant(tenant_id)
- LOG.debug("Tenant is: %s", str(t))
+ p = pr_cl.show_project(project_id)
+ LOG.debug("Project is: %s", str(p))
return True
except Exception as ex:
- LOG.debug("Tenant no longer exists? %s", ex)
+ LOG.debug("Project no longer exists? %s", ex)
return False
def _init_state(self):
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index c75bc85..025959a 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -32,12 +32,11 @@
CONF_PRIV_NETWORK_NAME = None
CONF_PUB_NETWORK = None
CONF_PUB_ROUTER = None
-CONF_TENANTS = None
+CONF_PROJECTS = None
CONF_USERS = None
IS_CINDER = None
IS_GLANCE = None
-IS_HEAT = None
IS_NEUTRON = None
IS_NOVA = None
@@ -50,7 +49,7 @@
global CONF_PRIV_NETWORK_NAME
global CONF_PUB_NETWORK
global CONF_PUB_ROUTER
- global CONF_TENANTS
+ global CONF_PROJECTS
global CONF_USERS
global IS_CINDER
global IS_GLANCE
@@ -60,7 +59,6 @@
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
- IS_HEAT = CONF.service_available.heat
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
@@ -69,7 +67,7 @@
CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
CONF_PUB_NETWORK = CONF.network.public_network_id
CONF_PUB_ROUTER = CONF.network.public_router_id
- CONF_TENANTS = [CONF.auth.admin_project_name]
+ CONF_PROJECTS = [CONF.auth.admin_project_name]
CONF_USERS = [CONF.auth.admin_username]
if IS_NEUTRON:
@@ -82,14 +80,14 @@
am = clients.Manager(
credentials.get_configured_admin_credentials())
net_cl = am.networks_client
- tn_cl = am.tenants_client
+ pr_cl = am.projects_client
networks = net_cl.list_networks()
- tenant = identity.get_tenant_by_name(tn_cl, project_name)
- t_id = tenant['id']
+ project = identity.get_project_by_name(pr_cl, project_name)
+ p_id = project['id']
n_id = None
for net in networks['networks']:
- if (net['tenant_id'] == t_id and net['name'] == net_name):
+ if (net['project_id'] == p_id and net['name'] == net_name):
n_id = net['id']
break
return n_id
@@ -141,7 +139,7 @@
def __init__(self, manager, **kwargs):
super(SnapshotService, self).__init__(kwargs)
- self.client = manager.snapshots_client
+ self.client = manager.snapshots_client_latest
def list(self):
client = self.client
@@ -212,33 +210,6 @@
self.data['server_groups'] = sgs
-class StackService(BaseService):
- def __init__(self, manager, **kwargs):
- super(StackService, self).__init__(kwargs)
- params = config.service_client_config('orchestration')
- self.client = manager.orchestration.OrchestrationClient(
- manager.auth_provider, **params)
-
- def list(self):
- client = self.client
- stacks = client.list_stacks()['stacks']
- LOG.debug("List count, %s Stacks", len(stacks))
- return stacks
-
- def delete(self):
- client = self.client
- stacks = self.list()
- for stack in stacks:
- try:
- client.delete_stack(stack['id'])
- except Exception:
- LOG.exception("Delete Stack exception.")
-
- def dry_run(self):
- stacks = self.list()
- self.data['stacks'] = stacks
-
-
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
super(KeyPairService, self).__init__(kwargs)
@@ -319,7 +290,7 @@
class VolumeService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeService, self).__init__(kwargs)
- self.client = manager.volumes_client
+ self.client = manager.volumes_client_latest
def list(self):
client = self.client
@@ -344,7 +315,7 @@
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeQuotaService, self).__init__(kwargs)
- self.client = manager.volume_quotas_client
+ self.client = manager.volume_quotas_v2_client
def delete(self):
client = self.client
@@ -786,14 +757,14 @@
class IdentityService(BaseService):
def __init__(self, manager, **kwargs):
super(IdentityService, self).__init__(kwargs)
- self.client = manager.identity_client
+ self.client = manager.identity_v3_client
class UserService(BaseService):
def __init__(self, manager, **kwargs):
super(UserService, self).__init__(kwargs)
- self.client = manager.users_client
+ self.client = manager.users_v3_client
def list(self):
users = self.client.list_users()['users']
@@ -872,43 +843,43 @@
self.data['roles'][role['id']] = role['name']
-class TenantService(BaseService):
+class ProjectService(BaseService):
def __init__(self, manager, **kwargs):
- super(TenantService, self).__init__(kwargs)
- self.client = manager.tenants_client
+ super(ProjectService, self).__init__(kwargs)
+ self.client = manager.projects_client
def list(self):
- tenants = self.client.list_tenants()['tenants']
+ projects = self.client.list_projects()['projects']
if not self.is_save_state:
- tenants = [tenant for tenant in tenants if (tenant['id']
- not in self.saved_state_json['tenants'].keys()
- and tenant['name'] != CONF.auth.admin_project_name)]
+ projects = [project for project in projects if (project['id']
+ not in self.saved_state_json['projects'].keys()
+ and project['name'] != CONF.auth.admin_project_name)]
if self.is_preserve:
- tenants = [tenant for tenant in tenants if tenant['name']
- not in CONF_TENANTS]
+ projects = [project for project in projects if project['name']
+ not in CONF_PROJECTS]
- LOG.debug("List count, %s Tenants after reconcile", len(tenants))
- return tenants
+ LOG.debug("List count, %s Projects after reconcile", len(projects))
+ return projects
def delete(self):
- tenants = self.list()
- for tenant in tenants:
+ projects = self.list()
+ for project in projects:
try:
- self.client.delete_tenant(tenant['id'])
+ self.client.delete_project(project['id'])
except Exception:
- LOG.exception("Delete Tenant exception.")
+ LOG.exception("Delete project exception.")
def dry_run(self):
- tenants = self.list()
- self.data['tenants'] = tenants
+ projects = self.list()
+ self.data['projects'] = projects
def save_state(self):
- tenants = self.list()
- self.data['tenants'] = {}
- for tenant in tenants:
- self.data['tenants'][tenant['id']] = tenant['name']
+ projects = self.list()
+ self.data['projects'] = {}
+ for project in projects:
+ self.data['projects'][project['id']] = project['name']
class DomainService(BaseService):
@@ -948,35 +919,33 @@
self.data['domains'][domain['id']] = domain['name']
-def get_tenant_cleanup_services():
- tenant_services = []
+def get_project_cleanup_services():
+ project_services = []
# TODO(gmann): Tempest should provide some plugin hook for cleanup
# script extension to plugin tests also.
if IS_NOVA:
- tenant_services.append(ServerService)
- tenant_services.append(KeyPairService)
- tenant_services.append(SecurityGroupService)
- tenant_services.append(ServerGroupService)
+ project_services.append(ServerService)
+ project_services.append(KeyPairService)
+ project_services.append(SecurityGroupService)
+ project_services.append(ServerGroupService)
if not IS_NEUTRON:
- tenant_services.append(FloatingIpService)
- tenant_services.append(NovaQuotaService)
- if IS_HEAT:
- tenant_services.append(StackService)
+ project_services.append(FloatingIpService)
+ project_services.append(NovaQuotaService)
if IS_NEUTRON:
- tenant_services.append(NetworkFloatingIpService)
+ project_services.append(NetworkFloatingIpService)
if utils.is_extension_enabled('metering', 'network'):
- tenant_services.append(NetworkMeteringLabelRuleService)
- tenant_services.append(NetworkMeteringLabelService)
- tenant_services.append(NetworkRouterService)
- tenant_services.append(NetworkPortService)
- tenant_services.append(NetworkSubnetService)
- tenant_services.append(NetworkService)
- tenant_services.append(NetworkSecGroupService)
+ project_services.append(NetworkMeteringLabelRuleService)
+ project_services.append(NetworkMeteringLabelService)
+ project_services.append(NetworkRouterService)
+ project_services.append(NetworkPortService)
+ project_services.append(NetworkSubnetService)
+ project_services.append(NetworkService)
+ project_services.append(NetworkSecGroupService)
if IS_CINDER:
- tenant_services.append(SnapshotService)
- tenant_services.append(VolumeService)
- tenant_services.append(VolumeQuotaService)
- return tenant_services
+ project_services.append(SnapshotService)
+ project_services.append(VolumeService)
+ project_services.append(VolumeQuotaService)
+ return project_services
def get_global_cleanup_services():
@@ -986,7 +955,7 @@
if IS_GLANCE:
global_services.append(ImageService)
global_services.append(UserService)
- global_services.append(TenantService)
+ global_services.append(ProjectService)
global_services.append(DomainService)
global_services.append(RoleService)
return global_services
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index f07f197..6435717 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -19,11 +19,11 @@
==============
Tempest run has several options:
- * **--regex/-r**: This is a selection regex like what testr uses. It will run
- any tests that match on re.match() with the regex
- * **--smoke/-s**: Run all the tests tagged as smoke
+* ``--regex, -r``: This is a selection regex like what testr uses. It will run
+ any tests that match on re.match() with the regex
+* ``--smoke, -s``: Run all the tests tagged as smoke
-There are also the **--blacklist-file** and **--whitelist-file** options that
+There are also the ``--blacklist-file`` and ``--whitelist-file`` options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
@@ -44,21 +44,21 @@
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
-You can also use the **--list-tests** option in conjunction with selection
+You can also use the ``--list-tests`` option in conjunction with selection
arguments to list which tests will be run.
-You can also use the **--load-list** option that lets you pass a filepath to
+You can also use the ``--load-list`` option that lets you pass a filepath to
tempest run with the file format being in a non-regex format, similar to the
-tests generated by the **--list-tests** option. You can specify target tests
+tests generated by the ``--list-tests`` option. You can specify target tests
by removing unnecessary tests from a list file which is generated from
-**--list-tests** option.
+``--list-tests`` option.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
-If you want to adjust the number of workers use the **--concurrency** option
-and if you want to run tests serially use **--serial/-t**
+If you want to adjust the number of workers use the ``--concurrency`` option
+and if you want to run tests serially use ``--serial/-t``
Running with Workspaces
-----------------------
@@ -82,7 +82,7 @@
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
-output to STDOUT use the **--subunit** flag
+output to STDOUT use the ``--subunit`` flag
Combining Runs
==============
@@ -90,7 +90,7 @@
There are certain situations in which you want to split a single run of tempest
across 2 executions of tempest run. (for example to run part of the tests
serially and others in parallel) To accomplish this but still treat the results
-as a single run you can leverage the **--combine** option which will append
+as a single run you can leverage the ``--combine`` option which will append
the current run's results with the previous runs.
"""
@@ -149,7 +149,7 @@
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
- testr_conf_file.write(file_contents)
+ testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index f9ebe20..f0ade7e 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -21,17 +21,14 @@
Runtime Arguments
-----------------
-**--subunit, -s**: (Optional) The path to the subunit file being parsed,
-defaults to stdin
-
-**--non-subunit-name, -n**: (Optional) The file_name that the logs are being
-stored in
-
-**--output-file, -o**: (Optional) The path where the JSON output will be
-written to. This contains more information than is present in stdout.
-
-**--ports, -p**: (Optional) The path to a JSON file describing the ports being
-used by different services
+* ``--subunit, -s``: (Optional) The path to the subunit file being parsed,
+ defaults to stdin
+* ``--non-subunit-name, -n``: (Optional) The file_name that the logs are being
+ stored in
+* ``--output-file, -o``: (Optional) The path where the JSON output will be
+ written to. This contains more information than is present in stdout.
+* ``--ports, -p``: (Optional) The path to a JSON file describing the ports
+ being used by different services
Usage
-----
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index a72493d..15af271 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -76,7 +76,6 @@
from tempest import config
import tempest.lib.common.http
from tempest.lib import exceptions as lib_exc
-from tempest.services import object_storage
CONF = config.CONF
@@ -197,10 +196,6 @@
def verify_keystone_api_versions(os, update):
# Check keystone api versions
versions = _get_api_versions(os, 'keystone')
- if (CONF.identity_feature_enabled.api_v2 !=
- contains_version('v2.', versions)):
- print_and_or_update('api_v2', 'identity-feature-enabled',
- not CONF.identity_feature_enabled.api_v2, update)
if (CONF.identity_feature_enabled.api_v3 !=
contains_version('v3.', versions)):
print_and_or_update('api_v3', 'identity-feature-enabled',
@@ -236,11 +231,10 @@
def get_extension_client(os, service):
- params = config.service_client_config('object-storage')
extensions_client = {
'nova': os.compute.ExtensionsClient(),
'neutron': os.network.ExtensionsClient(),
- 'swift': object_storage.CapabilitiesClient(os.auth_provider, **params),
+ 'swift': os.object_storage.CapabilitiesClient(),
# NOTE: Cinder v3 API is current and v2 and v1 are deprecated.
# V3 extension API is the same as v2, so we reuse the v2 client
# for v3 API also.
@@ -355,7 +349,6 @@
'image': 'glance',
'object_storage': 'swift',
'compute': 'nova',
- 'orchestration': 'heat',
'baremetal': 'ironic',
'identity': 'keystone',
}
diff --git a/tempest/cmd/workspace.py b/tempest/cmd/workspace.py
index 8166b4f..929a584 100644
--- a/tempest/cmd/workspace.py
+++ b/tempest/cmd/workspace.py
@@ -26,28 +26,28 @@
register
--------
-Registers a new tempest workspace via a given --name and --path
+Registers a new tempest workspace via a given ``--name`` and ``--path``
rename
------
-Renames a tempest workspace from --old-name to --new-name
+Renames a tempest workspace from ``--old-name`` to ``--new-name``
move
----
-Changes the path of a given tempest workspace --name to --path
+Changes the path of a given tempest workspace ``--name`` to ``--path``
remove
------
-Deletes the entry for a given tempest workspace --name
+Deletes the entry for a given tempest workspace ``--name``
---rmdir Deletes the given tempest workspace directory
+``--rmdir`` Deletes the given tempest workspace directory
General Options
===============
- **--workspace_path**: Allows the user to specify a different location for the
- workspace.yaml file containing the workspace definitions
- instead of ~/.tempest/workspace.yaml
+* ``--workspace_path``: Allows the user to specify a different location for the
+ workspace.yaml file containing the workspace definitions instead of
+ ``~/.tempest/workspace.yaml``
"""
import os
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 86fe3f5..638ad9b 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -229,7 +229,7 @@
clients.servers_client, server['id'], wait_until)
# Multiple validatable servers are not supported for now. Their
- # creation will fail with the condition above (l.58).
+ # creation will fail with the condition above.
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
_setup_validation_fip()
@@ -289,13 +289,21 @@
def create_websocket(url):
url = urlparse.urlparse(url)
- if url.scheme == 'https':
- client_socket = ssl.wrap_socket(socket.socket(socket.AF_INET,
- socket.SOCK_STREAM))
+ for res in socket.getaddrinfo(url.hostname, url.port,
+ socket.AF_UNSPEC, socket.SOCK_STREAM):
+ af, socktype, proto, _, sa = res
+ client_socket = socket.socket(af, socktype, proto)
+ if url.scheme == 'https':
+ client_socket = ssl.wrap_socket(client_socket)
+ client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ client_socket.connect(sa)
+ except socket.error:
+ client_socket.close()
+ continue
+ break
else:
- client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- client_socket.connect((url.hostname, url.port))
+ raise socket.error('WebSocket creation failed')
# Turn the Socket into a WebSocket to do the communication
return _WebSocket(client_socket, url)
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index a340531..75db155 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -86,7 +86,7 @@
('public_network_id', CONF.network.public_network_id),
('create_networks', (CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network)),
- ('resource_prefix', CONF.resources_prefix),
+ ('resource_prefix', 'tempest'),
('identity_admin_endpoint_type', endpoint_type)
]))
@@ -219,13 +219,6 @@
'alt_user': ('identity', 'alt')
}
-DEFAULT_PARAMS = {
- 'disable_ssl_certificate_validation':
- CONF.identity.disable_ssl_certificate_validation,
- 'ca_certs': CONF.identity.ca_certificates_file,
- 'trace_requests': CONF.debug.trace_requests
-}
-
def get_configured_admin_credentials(fill_in=True, identity_version=None):
"""Get admin credentials from the config file
@@ -252,7 +245,7 @@
if identity_version == 'v3':
conf_attributes.append('domain_name')
# Read the parts of credentials from config
- params = DEFAULT_PARAMS.copy()
+ params = config.service_client_config()
for attr in conf_attributes:
params[attr] = getattr(CONF.auth, 'admin_' + attr)
# Build and validate credentials. We are reading configured credentials,
@@ -282,7 +275,7 @@
:param kwargs: Attributes to be used to build the Credentials object.
:returns: An object of a sub-type of `auth.Credentials`
"""
- params = dict(DEFAULT_PARAMS, **kwargs)
+ params = dict(config.service_client_config(), **kwargs)
identity_version = identity_version or CONF.identity.auth_version
# In case of "v3" add the domain from config if not specified
# To honour the "default_credentials_domain_name", if not domain
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index 6e496d3..eaf651b 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -20,6 +20,15 @@
CONF = config.CONF
+def get_project_by_name(client, project_name):
+ projects = client.list_projects({'name': project_name})['projects']
+ for project in projects:
+ if project['name'] == project_name:
+ return project
+ raise lib_exc.NotFound('No such project(%s) in %s' % (project_name,
+ projects))
+
+
def get_tenant_by_name(client, tenant_name):
tenants = client.list_tenants()['tenants']
for tenant in tenants:
@@ -36,6 +45,18 @@
raise lib_exc.NotFound('No such user(%s) in %s' % (username, users))
+def get_user_by_project(users_client, roles_client, project_id, username):
+ users = users_client.list_users(**{'name': username})['users']
+ users_in_project = roles_client.list_role_assignments(
+ **{'scope.project.id': project_id})['role_assignments']
+ for user in users:
+ if user['name'] == username:
+ for u in users_in_project:
+ if u['user']['id'] == user['id']:
+ return user
+ raise lib_exc.NotFound('No such user(%s) in %s' % (username, users))
+
+
def identity_utils(clients):
"""A client that abstracts v2 and v3 identity operations.
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 5a86caa..225a713 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -31,10 +31,9 @@
if attr == 'rand_name':
# NOTE(flwang): This is a proxy to generate a random name that
- # includes a random number and a prefix if one is configured in
- # CONF.resources_prefix
+ # includes a random number and a prefix 'tempest'
attr_obj = partial(lib_data_utils.rand_name,
- prefix=CONF.resources_prefix)
+ prefix='tempest')
else:
attr_obj = getattr(lib_data_utils, attr)
@@ -78,7 +77,7 @@
decorators.attr(type=list(args))(f)
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
@@ -86,7 +85,7 @@
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
return wrapper
return decorator
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 10afee0..08e2a12 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -179,15 +179,13 @@
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, statuses):
- """Waits for a volume resource to reach any of the specified statuses.
+def wait_for_volume_resource_status(client, resource_id, status):
+ """Waits for a volume resource to reach a given status.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
"""
- if not isinstance(statuses, list):
- statuses = [statuses]
resource_name = re.findall(
r'(volume|group-snapshot|snapshot|backup|group)',
client.resource_type)[-1].replace('-', '_')
@@ -195,11 +193,11 @@
resource_status = show_resource(resource_id)[resource_name]['status']
start = int(time.time())
- while resource_status not in statuses:
+ while resource_status != status:
time.sleep(client.build_interval)
resource_status = show_resource(resource_id)[
'{}'.format(resource_name)]['status']
- if resource_status == 'error' and resource_status not in statuses:
+ if resource_status == 'error' and resource_status != status:
raise exceptions.VolumeResourceBuildErrorException(
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
@@ -208,11 +206,11 @@
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
- (resource_name, resource_id, statuses, resource_status,
+ (resource_name, resource_id, status, resource_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)
LOG.info('%s %s reached %s after waiting for %f seconds',
- resource_name, resource_id, statuses, time.time() - start)
+ resource_name, resource_id, status, time.time() - start)
def wait_for_volume_retype(client, volume_id, new_volume_type):
diff --git a/tempest/config.py b/tempest/config.py
index 4d0839a..340a27e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -65,9 +65,7 @@
deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
group='auth'),
cfg.DeprecatedOpt('allow_tenant_isolation',
- group='compute'),
- cfg.DeprecatedOpt('allow_tenant_isolation',
- group='orchestration')]),
+ group='compute')]),
cfg.ListOpt('tempest_roles',
help="Roles to assign to all users created by tempest",
default=[]),
@@ -106,6 +104,7 @@
secret=True,
deprecated_group='identity'),
cfg.StrOpt('admin_domain_name',
+ default='Default',
help="Admin domain name for authentication (Keystone V3)."
"The same domain applies to user and project",
deprecated_group='identity'),
@@ -194,6 +193,8 @@
default=60,
help='Timeout in seconds to wait for the http request to '
'return'),
+ cfg.StrOpt('proxy_url',
+ help='Specify an http proxy to use.')
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
@@ -205,8 +206,14 @@
help='Does the identity service have delegation and '
'impersonation enabled'),
cfg.BoolOpt('api_v2',
- default=True,
- help='Is the v2 identity API enabled'),
+ default=False,
+ help='Is the v2 identity API enabled',
+ deprecated_for_removal=True,
+ deprecated_reason='The identity v2.0 API was removed in the '
+ 'Queens release. Tests that exercise the '
+ 'v2.0 API will be removed from tempest in '
+ 'the v22.0.0 release. They are kept only to '
+ 'test stable branches.'),
cfg.BoolOpt('api_v2_admin',
default=True,
help="Is the v2 identity admin API available? This setting "
@@ -219,18 +226,8 @@
help="A list of enabled identity extensions with a special "
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
- "To get the list of extensions run: 'keystone discover'"),
- # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed
- # in Newton and Ocata. This option can be removed after Mitaka is end of
- # life.
- cfg.BoolOpt('forbid_global_implied_dsr',
- default=False,
- help='Does the environment forbid global roles implying '
- 'domain specific ones?',
- deprecated_for_removal=True,
- deprecated_reason="This feature flag was introduced to "
- "support testing of old OpenStack versions, "
- "which are not supported anymore"),
+ "To get the list of extensions run: "
+ "'openstack extension list --identity'"),
cfg.BoolOpt('domain_specific_drivers',
default=False,
help='Are domain specific drivers enabled? '
@@ -303,9 +300,9 @@
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
- 'when shelved. This time should be the same as the time '
- 'of nova.conf, and some tests will run for as long as the '
- 'time.'),
+ 'when shelved. This configuration value should be same as '
+ '[nova.DEFAULT]->shelved_offload_time in nova.conf, and '
+ 'some tests will run for as long as the time.'),
cfg.IntOpt('min_compute_nodes',
default=1,
help=('The minimum number of compute nodes expected. This will '
@@ -475,6 +472,15 @@
default=False,
help='Does the test environment support in-place swapping of '
'volumes attached to a server instance?'),
+ cfg.BoolOpt('volume_backed_live_migration',
+ default=False,
+ help='Does the test environment support volume-backed live '
+ 'migration?'),
+ cfg.BoolOpt('volume_multiattach',
+ default=False,
+ help='Does the test environment support attaching a volume to '
+ 'more than one instance? This depends on hypervisor and '
+ 'volume backend/type and compute API version 2.60.'),
]
@@ -539,13 +545,6 @@
'are current one. In future, Tempest will '
'test v2 APIs only so this config option '
'will be removed.'),
- cfg.BoolOpt('deactivate_image',
- default=False,
- help="Is the deactivate-image feature enabled."
- " The feature has been integrated since Kilo.",
- deprecated_for_removal=True,
- deprecated_reason="All supported versions of OpenStack now "
- "support the 'deactivate_image' feature"),
]
network_group = cfg.OptGroup(name='network',
@@ -836,7 +835,14 @@
help="Is the v2 volume API enabled"),
cfg.BoolOpt('api_v3',
default=True,
- help="Is the v3 volume API enabled")
+ help="Is the v3 volume API enabled"),
+ cfg.BoolOpt('extend_attached_volume',
+ default=False,
+ help='Does the cloud support extending the size of a volume '
+ 'which is currently attached to a server instance? This '
+ 'depends on the 3.42 volume API microversion and the '
+ '2.51 compute API microversion. Also, not all volume or '
+ 'compute backends support this operation.')
]
@@ -907,66 +913,6 @@
help="Execute discoverability tests"),
]
-orchestration_group = cfg.OptGroup(name='orchestration',
- title='Orchestration Service Options')
-
-OrchestrationGroup = [
- cfg.StrOpt('catalog_type',
- default='orchestration',
- help="Catalog type of the Orchestration service.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('region',
- default='',
- help="The orchestration region name to use. If empty, the "
- "value of identity.region is used instead. If no such "
- "region is found in the service catalog, the first found "
- "one is used.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('endpoint_type',
- default='publicURL',
- choices=['public', 'admin', 'internal',
- 'publicURL', 'adminURL', 'internalURL'],
- help="The endpoint type to use for the orchestration service.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('stack_owner_role', default='heat_stack_owner',
- help='Role required for users to be able to manage stacks',
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('build_interval',
- default=1,
- help="Time in seconds between build status checks.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('build_timeout',
- default=1200,
- help="Timeout in seconds to wait for a stack to build.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('instance_type',
- default='m1.micro',
- help="Instance type for tests. Needs to be big enough for a "
- "full OS plus the test workload",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('keypair_name',
- help="Name of existing keypair to launch servers with.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('max_template_size',
- default=524288,
- help="Value must match heat configuration of the same name.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('max_resources_per_stack',
- default=1000,
- help="Value must match heat configuration of the same name.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
-]
-
scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
@@ -1028,11 +974,6 @@
cfg.BoolOpt('nova',
default=True,
help="Whether or not nova is expected to be available"),
- cfg.BoolOpt('heat',
- default=False,
- help="Whether or not Heat is expected to be available",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
]
debug_group = cfg.OptGroup(name="debug",
@@ -1062,17 +1003,6 @@
]
DefaultGroup = [
- cfg.StrOpt('resources_prefix',
- default='tempest',
- help="Prefix to be added when generating the name for "
- "test resources. It can be used to discover all "
- "resources associated with a specific test run when "
- "running tempest on a real-life cloud",
- deprecated_for_removal=True,
- deprecated_reason="It is enough to add 'tempest' as this "
- "prefix to ideintify resources which are "
- "created by Tempest and no projects set "
- "this option on OpenStack dev community."),
cfg.BoolOpt('pause_teardown',
default=False,
help="""Whether to pause a test in global teardown.
@@ -1100,7 +1030,6 @@
(volume_feature_group, VolumeFeaturesGroup),
(object_storage_group, ObjectStoreGroup),
(object_storage_feature_group, ObjectStoreFeaturesGroup),
- (orchestration_group, OrchestrationGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
@@ -1167,7 +1096,6 @@
self.object_storage = _CONF['object-storage']
self.object_storage_feature_enabled = _CONF[
'object-storage-feature-enabled']
- self.orchestration = _CONF.orchestration
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
self.debug = _CONF.debug
@@ -1301,6 +1229,7 @@
* `ca_certs`
* `trace_requests`
* `http_timeout`
+ * `proxy_url`
The dict returned by this does not fit a few service clients:
@@ -1323,7 +1252,8 @@
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests,
- 'http_timeout': CONF.service_clients.http_timeout
+ 'http_timeout': CONF.service_clients.http_timeout,
+ 'proxy_url': CONF.service_clients.proxy_url,
}
if service_client_name is None:
@@ -1377,7 +1307,7 @@
module = service_clients[service_client]
configs = service_client.split('.')[0]
service_client_data = dict(
- name=service_client.replace('.', '_'),
+ name=service_client.replace('.', '_').replace('-', '_'),
service_version=service_client,
module_path=module.__name__,
client_names=module.__all__,
diff --git a/tempest/tests/services/__init__.py b/tempest/lib/api_schema/response/compute/v2_45/__init__.py
similarity index 100%
rename from tempest/tests/services/__init__.py
rename to tempest/lib/api_schema/response/compute/v2_45/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_45/images.py b/tempest/lib/api_schema/response/compute/v2_45/images.py
new file mode 100644
index 0000000..8a48f36
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_45/images.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# The 2.45 microversion removes the "location" header and adds "image_id"
+# to the response body.
+create_image = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'image_id': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['image_id']
+ }
+}
+
+# NOTE(mriedem): The compute proxy APIs for showing/listing and deleting
+# images were deprecated in microversion 2.35, and the compute proxy APIs for
+# working with image metadata were deprecated in microversion 2.39. Therefore,
+# client-side code shouldn't rely on those APIs in the compute images client
+# past those microversions and should instead use the Glance images client
+# directly.
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index ab4308f..2dd9d00 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -261,12 +261,13 @@
def __init__(self, credentials, auth_url,
disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None, scope='project',
- http_timeout=None):
+ http_timeout=None, proxy_url=None):
super(KeystoneAuthProvider, self).__init__(credentials, scope)
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
self.trace_requests = trace_requests
self.http_timeout = http_timeout
+ self.proxy_url = proxy_url
self.auth_url = auth_url
self.auth_client = self._auth_client(auth_url)
@@ -345,7 +346,7 @@
return json_v2id.TokenClient(
auth_url, disable_ssl_certificate_validation=self.dscv,
ca_certs=self.ca_certs, trace_requests=self.trace_requests,
- http_timeout=self.http_timeout)
+ http_timeout=self.http_timeout, proxy_url=self.proxy_url)
def _auth_params(self):
"""Auth parameters to be passed to the token request
@@ -433,7 +434,7 @@
return json_v3id.V3TokenClient(
auth_url, disable_ssl_certificate_validation=self.dscv,
ca_certs=self.ca_certs, trace_requests=self.trace_requests,
- http_timeout=self.http_timeout)
+ http_timeout=self.http_timeout, proxy_url=self.proxy_url)
def _auth_params(self):
"""Auth parameters to be passed to the token request
@@ -599,7 +600,8 @@
def get_credentials(auth_url, fill_in=True, identity_version='v2',
disable_ssl_certificate_validation=None, ca_certs=None,
- trace_requests=None, http_timeout=None, **kwargs):
+ trace_requests=None, http_timeout=None, proxy_url=None,
+ **kwargs):
"""Builds a credentials object based on the configured auth_version
:param auth_url (string): Full URI of the OpenStack Identity API(Keystone)
@@ -617,6 +619,7 @@
:param trace_requests: trace in log API requests to the auth system
:param http_timeout: timeout in seconds to wait for the http request to
return
+ :param proxy_url: URL of HTTP(s) proxy used when fill_in is True
:param kwargs (dict): Dict of credential key/value pairs
Examples:
@@ -641,7 +644,7 @@
auth_provider = auth_provider_class(
creds, auth_url, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests,
- http_timeout=http_timeout)
+ http_timeout=http_timeout, proxy_url=proxy_url)
creds = auth_provider.fill_credentials()
return creds
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index 5468a7b..3fb56ec 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -58,8 +58,6 @@
if six.PY2:
cmd = cmd.encode('utf-8')
cmd = shlex.split(cmd)
- result = ''
- result_err = ''
stdout = subprocess.PIPE
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
@@ -93,10 +91,23 @@
:type insecure: boolean
:param prefix: prefix to insert before commands
:type prefix: string
+ :param user_domain_name: User's domain name
+ :type user_domain_name: string
+ :param user_domain_id: User's domain ID
+ :type user_domain_id: string
+ :param project_domain_name: Project's domain name
+ :type project_domain_name: string
+ :param project_domain_id: Project's domain ID
+ :type project_domain_id: string
+ :param identity_api_version: Version of the Identity API
+ :type identity_api_version: string
"""
def __init__(self, username='', password='', tenant_name='', uri='',
- cli_dir='', insecure=False, prefix='', *args, **kwargs):
+ cli_dir='', insecure=False, prefix='', user_domain_name=None,
+ user_domain_id=None, project_domain_name=None,
+ project_domain_id=None, identity_api_version=None, *args,
+ **kwargs):
"""Initialize a new CLIClient object."""
super(CLIClient, self).__init__()
self.cli_dir = cli_dir if cli_dir else '/usr/bin'
@@ -106,6 +117,11 @@
self.uri = uri
self.insecure = insecure
self.prefix = prefix
+ self.user_domain_name = user_domain_name
+ self.user_domain_id = user_domain_id
+ self.project_domain_name = project_domain_name
+ self.project_domain_id = project_domain_id
+ self.identity_api_version = identity_api_version
def nova(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
@@ -360,12 +376,23 @@
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
- creds = ('--os-username %s --os-tenant-name %s --os-password %s '
+ creds = ('--os-username %s --os-project-name %s --os-password %s '
'--os-auth-url %s' %
(self.username,
self.tenant_name,
self.password,
self.uri))
+ if self.identity_api_version:
+ creds += ' --os-identity-api-version %s' % (
+ self.identity_api_version)
+ if self.user_domain_name is not None:
+ creds += ' --os-user-domain-name %s' % self.user_domain_name
+ if self.user_domain_id is not None:
+ creds += ' --os-user-domain-id %s' % self.user_domain_id
+ if self.project_domain_name is not None:
+ creds += ' --os-project-domain-name %s' % self.project_domain_name
+ if self.project_domain_id is not None:
+ creds += ' --os-project-domain-id %s' % self.project_domain_id
if self.insecure:
flags = creds + ' --insecure ' + flags
else:
diff --git a/tempest/lib/common/fixed_network.py b/tempest/lib/common/fixed_network.py
index e2054a4..875a79d 100644
--- a/tempest/lib/common/fixed_network.py
+++ b/tempest/lib/common/fixed_network.py
@@ -38,7 +38,12 @@
raise exceptions.InvalidTestResource(type='network', name=name)
networks = compute_networks_client.list_networks()['networks']
- networks = [n for n in networks if n['label'] == name]
+ # NOTE(zhufl) compute networks_client uses 'label' as network name field,
+ # while neutron networks_client uses 'name' as network name field.
+ try:
+ networks = [n for n in networks if n['label'] == name]
+ except KeyError:
+ networks = [n for n in networks if n['name'] == name]
# Check that a network exists, else raise an InvalidConfigurationException
if len(networks) == 1:
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index b4b1fc9..738c37f 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -17,6 +17,47 @@
import urllib3
+class ClosingProxyHttp(urllib3.ProxyManager):
+ def __init__(self, proxy_url, disable_ssl_certificate_validation=False,
+ ca_certs=None, timeout=None):
+ kwargs = {}
+
+ if disable_ssl_certificate_validation:
+ urllib3.disable_warnings()
+ kwargs['cert_reqs'] = 'CERT_NONE'
+ elif ca_certs:
+ kwargs['cert_reqs'] = 'CERT_REQUIRED'
+ kwargs['ca_certs'] = ca_certs
+
+ if timeout:
+ kwargs['timeout'] = timeout
+
+ super(ClosingProxyHttp, self).__init__(proxy_url, **kwargs)
+
+ def request(self, url, method, *args, **kwargs):
+
+ class Response(dict):
+ def __init__(self, info):
+ for key, value in info.getheaders().items():
+ self[key.lower()] = value
+ self.status = info.status
+ self['status'] = str(self.status)
+ self.reason = info.reason
+ self.version = info.version
+ self['content-location'] = url
+
+ original_headers = kwargs.get('headers', {})
+ new_headers = dict(original_headers, connection='close')
+ new_kwargs = dict(kwargs, headers=new_headers)
+
+ # Follow up to 5 redirections. Don't raise an exception if
+ # it's exceeded but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ r = super(ClosingProxyHttp, self).request(method, url, retries=retry,
+ *args, **new_kwargs)
+ return Response(r), r.data
+
+
class ClosingHttp(urllib3.poolmanager.PoolManager):
def __init__(self, disable_ssl_certificate_validation=False,
ca_certs=None, timeout=None):
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 83db513..fcdeb17 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -344,11 +344,11 @@
net_creds = cred_provider.TestResources(credential)
net_clients = clients.ServiceClients(credentials=credential,
identity_uri=self.identity_uri)
- compute_network_client = net_clients.compute.NetworksClient()
+ networks_client = net_clients.network.NetworksClient()
net_name = self.hash_dict['networks'].get(hash, None)
try:
network = fixed_network.get_network_from_name(
- net_name, compute_network_client)
+ net_name, networks_client)
except lib_exc.InvalidTestResource:
network = {}
net_creds.set_resources(network=network)
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index f58d737..22276d4 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -69,6 +69,7 @@
of the request and response payload
:param str http_timeout: Timeout in seconds to wait for the http request to
return
+ :param str proxy_url: http proxy url to use.
"""
# The version of the API this client implements
@@ -80,7 +81,8 @@
endpoint_type='publicURL',
build_interval=1, build_timeout=60,
disable_ssl_certificate_validation=False, ca_certs=None,
- trace_requests='', name=None, http_timeout=None):
+ trace_requests='', name=None, http_timeout=None,
+ proxy_url=None):
self.auth_provider = auth_provider
self.service = service
self.region = region
@@ -100,9 +102,16 @@
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = disable_ssl_certificate_validation
- self.http_obj = http.ClosingHttp(
- disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
- timeout=http_timeout)
+
+ if proxy_url:
+ self.http_obj = http.ClosingProxyHttp(
+ proxy_url,
+ disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+ timeout=http_timeout)
+ else:
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+ timeout=http_timeout)
def get_headers(self, accept_type=None, send_type=None):
"""Return the default headers which will be used with outgoing requests
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index a0941ef..c5df590 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -18,9 +18,6 @@
import string
import uuid
-from debtcollector import removals
-import netaddr
-from oslo_utils import netutils
from oslo_utils import uuidutils
import six.moves
@@ -177,36 +174,6 @@
for i in range(size)])
-@removals.remove(
- message="use get_ipv6_addr_by_EUI64 from oslo_utils.netutils",
- version="Newton",
- removal_version="Ocata")
-def get_ipv6_addr_by_EUI64(cidr, mac):
- """Generate a IPv6 addr by EUI-64 with CIDR and MAC
-
- :param str cidr: a IPv6 CIDR
- :param str mac: a MAC address
- :return: an IPv6 Address
- :rtype: netaddr.IPAddress
- """
- # Check if the prefix is IPv4 address
- is_ipv4 = netutils.is_valid_ipv4(cidr)
- if is_ipv4:
- msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
- raise TypeError(msg)
- try:
- eui64 = int(netaddr.EUI(mac).eui64())
- prefix = netaddr.IPNetwork(cidr)
- return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
- except (ValueError, netaddr.AddrFormatError):
- raise TypeError('Bad prefix or mac format for generating IPv6 '
- 'address by EUI-64: %(prefix)s, %(mac)s:'
- % {'prefix': cidr, 'mac': mac})
- except TypeError:
- raise TypeError('Bad prefix type for generate IPv6 address by '
- 'EUI-64: %s' % cidr)
-
-
# Courtesy of http://stackoverflow.com/a/312464
def chunkify(sequence, chunksize):
"""Yield successive chunks from `sequence`."""
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index cd4092b..94fab00 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
import sys
import netaddr
@@ -25,13 +26,14 @@
def debug_ssh(function):
"""Decorator to generate extra debug info in case off SSH failure"""
+ @functools.wraps(function)
def wrapper(self, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except Exception as e:
caller = test_utils.find_test_caller() or "not found"
if not isinstance(e, tempest.lib.exceptions.SSHTimeout):
- message = ('Initializing SSH connection to %(ip)s failed. '
+ message = ('Executing command on %(ip)s failed. '
'Error: %(error)s' % {'ip': self.ip_address,
'error': e})
message = '(%s) %s' % (caller, message)
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index bd0db7c..2a9f3a9 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -86,22 +86,29 @@
pass
-def call_until_true(func, duration, sleep_for):
+def call_until_true(func, duration, sleep_for, *args, **kwargs):
"""Call the given function until it returns True (and return True)
or until the specified duration (in seconds) elapses (and return False).
- :param func: A zero argument callable that returns True on success.
+ :param func: A callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
+ :param args: args that are passed to func.
+ :param kwargs: kwargs that are passed to func.
"""
now = time.time()
+ begin_time = now
timeout = now + duration
+ func_name = getattr(func, '__name__', getattr(func.__class__, '__name__'))
while now < timeout:
- if func():
+ if func(*args, **kwargs):
+ LOG.debug("Call %s returns true in %f seconds",
+ func_name, time.time() - begin_time)
return True
time.sleep(sleep_for)
now = time.time()
+ LOG.debug("Call %s returns false in %f seconds", func_name, duration)
return False
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index f82f707..e99dd24 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -15,7 +15,6 @@
import functools
import uuid
-import debtcollector.removals
from oslo_log import log as logging
import six
import testtools
@@ -31,7 +30,7 @@
"""
def decorator(f):
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
skip = False
if "condition" in kwargs:
if kwargs["condition"] is True:
@@ -43,7 +42,7 @@
raise ValueError('bug must be a valid bug number')
msg = "Skipped until Bug: %s is resolved." % kwargs["bug"]
raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
return wrapper
return decorator
@@ -56,9 +55,9 @@
"""
def decorator(f):
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
try:
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
except Exception as exc:
exc_status_code = getattr(exc, 'status_code', None)
if status_code is None or status_code == exc_status_code:
@@ -87,25 +86,6 @@
return decorator
-@debtcollector.removals.remove(removal_version='Queen')
-class skip_unless_attr(object):
- """Decorator to skip tests if a specified attr does not exists or False"""
- def __init__(self, attr, msg=None):
- self.attr = attr
- self.message = msg or ("Test case attribute %s not found "
- "or False") % attr
-
- def __call__(self, func):
- @functools.wraps(func)
- def _skipper(*args, **kw):
- """Wrapped skipper function."""
- testobj = args[0]
- if not getattr(testobj, self.attr, False):
- raise testtools.TestCase.skipException(self.message)
- func(*args, **kw)
- return _skipper
-
-
def attr(**kwargs):
"""A decorator which applies the testtools attr decorator
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 9b2e87e..13af890 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -96,7 +96,7 @@
class Conflict(ClientRestClientException):
status_code = 409
- message = "An object with that identifier already exists"
+ message = "Conflict with state of target resource"
class Gone(ClientRestClientException):
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 4fa7a7a..8918a8c 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -31,6 +31,7 @@
from tempest.lib.services import identity
from tempest.lib.services import image
from tempest.lib.services import network
+from tempest.lib.services import object_storage
from tempest.lib.services import volume
warnings.simplefilter("once")
@@ -50,20 +51,13 @@
'image.v1': image.v1,
'image.v2': image.v2,
'network': network,
+ 'object-storage': object_storage,
'volume.v1': volume.v1,
'volume.v2': volume.v2,
'volume.v3': volume.v3
}
-def _tempest_internal_modules():
- # Set of unstable service clients available in Tempest
- # NOTE(andreaf) This list will exists only as long the remain clients
- # are migrated to tempest.lib, and it will then be deleted without
- # deprecation or advance notice
- return set(['object-storage'])
-
-
def available_modules():
"""Set of service client modules available in Tempest and plugins
@@ -101,17 +95,6 @@
plug_service_versions))
name_conflicts.append(exceptions.PluginRegistrationException(
name=plugin_name, detailed_error=detailed_error))
- # NOTE(andreaf) Once all tempest clients are stable, the following
- # if will have to be removed.
- if not plug_service_versions.isdisjoint(
- _tempest_internal_modules()):
- detailed_error = (
- 'Plugin %s is trying to register a service %s already '
- 'claimed by a Tempest one' % (plugin_name,
- _tempest_internal_modules() &
- plug_service_versions))
- name_conflicts.append(exceptions.PluginRegistrationException(
- name=plugin_name, detailed_error=detailed_error))
extra_service_versions |= plug_service_versions
if name_conflicts:
LOG.error(
@@ -276,7 +259,7 @@
@removals.removed_kwarg('client_parameters')
def __init__(self, credentials, identity_uri, region=None, scope='project',
disable_ssl_certificate_validation=True, ca_certs=None,
- trace_requests='', client_parameters=None):
+ trace_requests='', client_parameters=None, proxy_url=None):
"""Service Clients provider
Instantiate a `ServiceClients` object, from a set of credentials and an
@@ -336,6 +319,8 @@
name, as declared in `service_clients.available_modules()` except
for the version. Values are dictionaries of parameters that are
going to be passed to all clients in the service client module.
+ :param proxy_url: Applies to auth and to all service clients, set a
+ proxy url for the clients to use.
"""
self._registered_services = set([])
self.credentials = credentials
@@ -360,16 +345,20 @@
self.dscv = disable_ssl_certificate_validation
self.ca_certs = ca_certs
self.trace_requests = trace_requests
+ self.proxy_url = proxy_url
# Creates an auth provider for the credentials
self.auth_provider = auth_provider_class(
self.credentials, self.identity_uri, scope=scope,
disable_ssl_certificate_validation=self.dscv,
- ca_certs=self.ca_certs, trace_requests=self.trace_requests)
+ ca_certs=self.ca_certs, trace_requests=self.trace_requests,
+ proxy_url=proxy_url)
+
# Setup some defaults for client parameters of registered services
client_parameters = client_parameters or {}
self.parameters = {}
+
# Parameters are provided for unversioned services
- all_modules = available_modules() | _tempest_internal_modules()
+ all_modules = available_modules()
unversioned_services = set(
[x.split('.')[0] for x in all_modules])
for service in unversioned_services:
@@ -420,8 +409,8 @@
clients in tempest.
:param client_names: List or set of names of service client classes.
:param kwargs: Extra optional parameters to be passed to all clients.
- ServiceClient provides defaults for region, dscv, ca_certs and
- trace_requests.
+ ServiceClient provides defaults for region, dscv, ca_certs, http
+ proxies and trace_requests.
:raise ServiceClientRegistrationException: if the provided name is
already in use or if service_version is already registered.
:raise ImportError: if module_path cannot be imported.
@@ -442,7 +431,8 @@
params = dict(region=self.region,
disable_ssl_certificate_validation=self.dscv,
ca_certs=self.ca_certs,
- trace_requests=self.trace_requests)
+ trace_requests=self.trace_requests,
+ proxy_url=self.proxy_url)
params.update(kwargs)
# Instantiate the client factory
_factory = ClientsFactory(module_path=module_path,
@@ -456,9 +446,7 @@
@property
def registered_services(self):
- # NOTE(andreaf) Once all tempest modules are stable this needs to
- # be updated to remove _tempest_internal_modules
- return self._registered_services | _tempest_internal_modules()
+ return self._registered_services
def _setup_parameters(self, parameters):
"""Setup default values for client parameters
diff --git a/tempest/lib/services/compute/images_client.py b/tempest/lib/services/compute/images_client.py
index 86bea9e..0f4eb42 100644
--- a/tempest/lib/services/compute/images_client.py
+++ b/tempest/lib/services/compute/images_client.py
@@ -17,6 +17,7 @@
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import images as schema
+from tempest.lib.api_schema.response.compute.v2_45 import images as schemav245
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import base_compute_client
@@ -24,6 +25,10 @@
class ImagesClient(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.44', 'schema': schema},
+ {'min': '2.45', 'max': None, 'schema': schemav245}]
+
def create_image(self, server_id, **kwargs):
"""Create an image of the original server.
@@ -36,7 +41,10 @@
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body)
- self.validate_response(schema.create_image, resp, body)
+ _schema = self.get_schema(self.schema_versions_info)
+ if body:
+ body = json.loads(body)
+ self.validate_response(_schema.create_image, resp, body)
return rest_client.ResponseBody(resp, body)
def list_images(self, detail=False, **params):
diff --git a/tempest/lib/services/compute/quota_classes_client.py b/tempest/lib/services/compute/quota_classes_client.py
index 0fe9868..64e06f4 100644
--- a/tempest/lib/services/compute/quota_classes_client.py
+++ b/tempest/lib/services/compute/quota_classes_client.py
@@ -35,8 +35,9 @@
def update_quota_class_set(self, quota_class_id, **kwargs):
"""Update the quota class's limits for one or more resources.
- # NOTE: Current api-site doesn't contain this API description.
- # LP: https://bugs.launchpad.net/nova/+bug/1602400
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/compute/#create-or-update-quotas-for-quota-class
"""
post_body = json.dumps({'quota_class_set': kwargs})
diff --git a/tempest/lib/services/compute/quotas_client.py b/tempest/lib/services/compute/quotas_client.py
index daf4bc0..12df895 100644
--- a/tempest/lib/services/compute/quotas_client.py
+++ b/tempest/lib/services/compute/quotas_client.py
@@ -28,8 +28,8 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref-compute-v2.1.html/#show-a-quota
- http://developer.openstack.org/api-ref-compute-v2.1.html/#show-the-detail-of-quota
+ https://developer.openstack.org/api-ref/compute/#show-a-quota
+ https://developer.openstack.org/api-ref/compute/#show-the-detail-of-quota
"""
params = {}
@@ -49,7 +49,10 @@
return rest_client.ResponseBody(resp, body)
def show_default_quota_set(self, tenant_id):
- """List the default quota set for a tenant."""
+ """List the default quota set for a tenant.
+
+ https://developer.openstack.org/api-ref/compute/#list-default-quotas-for-tenant
+ """
url = 'os-quota-sets/%s/defaults' % tenant_id
resp, body = self.get(url)
@@ -79,7 +82,10 @@
return rest_client.ResponseBody(resp, body)
def delete_quota_set(self, tenant_id):
- """Delete the tenant's quota set."""
+ """Delete the tenant's quota set.
+
+ https://developer.openstack.org/api-ref/compute/#revert-quotas-to-defaults
+ """
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
self.validate_response(schema.delete_quota, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 598d5a6..09bccab 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -126,7 +126,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref-compute-v2.1.html#showServer
+ https://developer.openstack.org/api-ref/compute/#show-server-details
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
@@ -321,7 +321,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-replace-metadata-items
+ https://developer.openstack.org/api-ref/compute/#replace-metadata-items
"""
if no_metadata_field:
post_body = ""
@@ -338,7 +338,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-metadata-items
+ https://developer.openstack.org/api-ref/compute/#create-or-update-metadata-items
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % server_id,
@@ -609,9 +609,7 @@
For a full list of available parameters, please refer to the official
API reference:
- TODO (markus_z) The api-ref for that isn't yet available, update this
- here when the docs in Nova are updated. The old API is at
- http://developer.openstack.org/api-ref/compute/#get-serial-console-os-getserialconsole-action
+ https://developer.openstack.org/api-ref/compute/#create-remote-console
"""
param = {
'remote_console': {
@@ -722,7 +720,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action
+ https://developer.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action-deprecated
"""
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
@@ -732,7 +730,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action
+ https://developer.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action-deprecated
"""
return self.action(server_id, 'addFixedIp', **kwargs)
@@ -741,7 +739,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action
+ https://developer.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action-deprecated
"""
return self.action(server_id, 'removeFixedIp', **kwargs)
diff --git a/tempest/lib/services/identity/v3/identity_client.py b/tempest/lib/services/identity/v3/identity_client.py
index 2512a3e..ad770bf 100644
--- a/tempest/lib/services/identity/v3/identity_client.py
+++ b/tempest/lib/services/identity/v3/identity_client.py
@@ -57,3 +57,10 @@
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+
+ def list_auth_domains(self):
+ """Get available domain scopes."""
+ resp, body = self.get("auth/domains")
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/network/metering_label_rules_client.py b/tempest/lib/services/network/metering_label_rules_client.py
index 36cf8e3..9542e8f 100644
--- a/tempest/lib/services/network/metering_label_rules_client.py
+++ b/tempest/lib/services/network/metering_label_rules_client.py
@@ -16,6 +16,12 @@
class MeteringLabelRulesClient(base.BaseNetworkClient):
def create_metering_label_rule(self, **kwargs):
+ """Create metering label rule.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#create-metering-label-rule
+ """
uri = '/metering/metering-label-rules'
post_data = {'metering_label_rule': kwargs}
return self.create_resource(uri, post_data)
@@ -29,5 +35,11 @@
return self.delete_resource(uri)
def list_metering_label_rules(self, **filters):
+ """List metering label rules.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#list-metering-label-rules
+ """
uri = '/metering/metering-label-rules'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/quotas_client.py b/tempest/lib/services/network/quotas_client.py
index 752b253..e9666de 100644
--- a/tempest/lib/services/network/quotas_client.py
+++ b/tempest/lib/services/network/quotas_client.py
@@ -18,6 +18,12 @@
class QuotasClient(base.BaseNetworkClient):
def update_quotas(self, tenant_id, **kwargs):
+ """Update quota for a project.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#update-quota-for-a-project
+ """
put_body = {'quota': kwargs}
uri = '/quotas/%s' % tenant_id
return self.update_resource(uri, put_body)
@@ -35,3 +41,13 @@
def list_quotas(self, **filters):
uri = '/quotas'
return self.list_resources(uri, **filters)
+
+ def show_default_quotas(self, tenant_id):
+ """List default quotas for a project."""
+ uri = '/quotas/%s/default' % tenant_id
+ return self.show_resource(uri)
+
+ def show_quota_details(self, tenant_id):
+ """Show quota details for a project."""
+ uri = '/quotas/%s/details.json' % tenant_id
+ return self.show_resource(uri)
diff --git a/tempest/lib/services/network/service_providers_client.py b/tempest/lib/services/network/service_providers_client.py
index 0ee9bc3..01313a0 100644
--- a/tempest/lib/services/network/service_providers_client.py
+++ b/tempest/lib/services/network/service_providers_client.py
@@ -16,6 +16,11 @@
class ServiceProvidersClient(base.BaseNetworkClient):
def list_service_providers(self, **filters):
- """Lists service providers."""
+ """Lists service providers.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#list-service-providers
+ """
uri = '/service-providers'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/object_storage/__init__.py b/tempest/lib/services/object_storage/__init__.py
index e69de29..4303d09 100644
--- a/tempest/lib/services/object_storage/__init__.py
+++ b/tempest/lib/services/object_storage/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.object_storage.account_client import AccountClient
+from tempest.lib.services.object_storage.bulk_middleware_client import \
+ BulkMiddlewareClient
+from tempest.lib.services.object_storage.capabilities_client import \
+ CapabilitiesClient
+from tempest.lib.services.object_storage.container_client import \
+ ContainerClient
+from tempest.lib.services.object_storage.object_client import ObjectClient
+
+__all__ = ['AccountClient', 'BulkMiddlewareClient', 'CapabilitiesClient',
+ 'ContainerClient', 'ObjectClient']
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 67f01a6..6b097c1 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -34,7 +34,7 @@
Account Metadata can be created, updated or deleted based on
metadata header or value. For detailed info, please refer to the
official API reference:
- http://developer.openstack.org/api-ref/object-storage/?expanded=create-update-or-delete-account-metadata-detail
+ https://developer.openstack.org/api-ref/object-store/#create-update-or-delete-account-metadata
"""
headers = {}
if create_update_metadata:
diff --git a/tempest/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
similarity index 84%
rename from tempest/services/object_storage/container_client.py
rename to tempest/lib/services/object_storage/container_client.py
index a253599..430e0d4 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -24,30 +24,25 @@
class ContainerClient(rest_client.RestClient):
- def create_container(
- self, container_name,
- metadata=None,
- remove_metadata=None,
- metadata_prefix='X-Container-Meta-',
- remove_metadata_prefix='X-Remove-Container-Meta-'):
- """Creates a container
+ def update_container(self, container_name, **headers):
+ """Creates or Updates a container
- with optional metadata passed in as a dictionary
+ with optional metadata passed in as a dictionary.
+ Full list of allowed headers or value, please refer to the
+ official API reference:
+ https://developer.openstack.org/api-ref/object-store/#create-container
"""
url = str(container_name)
- headers = {}
-
- if metadata is not None:
- for key in metadata:
- headers[metadata_prefix + key] = metadata[key]
- if remove_metadata is not None:
- for key in remove_metadata:
- headers[remove_metadata_prefix + key] = remove_metadata[key]
resp, body = self.put(url, body=None, headers=headers)
self.expected_success([201, 202], resp.status)
return resp, body
+ # NOTE: This alias is for the usability because PUT can be used for both
+ # updating/creating a resource and this PUT is mainly used for creating
+ # on Swift container API.
+ create_container = update_container
+
def delete_container(self, container_name):
"""Deletes the container (if it's empty)."""
url = str(container_name)
@@ -102,7 +97,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/object-storage/?expanded=show-container-details-and-list-objects-detail
+ https://developer.openstack.org/api-ref/object-store/#show-container-details-and-list-objects
"""
url = str(container_name)
diff --git a/tempest/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
similarity index 62%
rename from tempest/services/object_storage/object_client.py
rename to tempest/lib/services/object_storage/object_client.py
index 6d656ec..383aff6 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -23,7 +23,8 @@
class ObjectClient(rest_client.RestClient):
def create_object(self, container, object_name, data,
- params=None, metadata=None, headers=None):
+ params=None, metadata=None, headers=None,
+ chunked=False):
"""Create storage object."""
if headers is None:
@@ -37,7 +38,7 @@
if params:
url += '?%s' % urlparse.urlencode(params)
- resp, body = self.put(url, data, headers)
+ resp, body = self.put(url, data, headers, chunked=chunked)
self.expected_success(201, resp.status)
return resp, body
@@ -50,28 +51,27 @@
self.expected_success([200, 204], resp.status)
return resp, body
- def update_object_metadata(self, container, object_name, metadata,
- metadata_prefix='X-Object-Meta-'):
+ def create_or_update_object_metadata(self, container, object_name,
+ headers=None):
"""Add, remove, or change X-Object-Meta metadata for storage object."""
- headers = {}
- for key in metadata:
- headers["%s%s" % (str(metadata_prefix), str(key))] = metadata[key]
-
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.post(url, None, headers=headers)
self.expected_success(202, resp.status)
return resp, body
- def list_object_metadata(self, container, object_name):
+ def list_object_metadata(self, container, object_name,
+ params=None, headers=None):
"""List all storage object X-Object-Meta- metadata."""
url = "%s/%s" % (str(container), str(object_name))
- resp, body = self.head(url)
+ if params:
+ url += '?%s' % urlparse.urlencode(params)
+ resp, body = self.head(url, headers=headers)
self.expected_success(200, resp.status)
return resp, body
- def get_object(self, container, object_name, metadata=None):
+ def get_object(self, container, object_name, metadata=None, params=None):
"""Retrieve object's data."""
headers = {}
@@ -80,45 +80,12 @@
headers[str(key)] = metadata[key]
url = "{0}/{1}".format(container, object_name)
+ if params:
+ url += '?%s' % urlparse.urlencode(params)
resp, body = self.get(url, headers=headers)
self.expected_success([200, 206], resp.status)
return resp, body
- def copy_object_in_same_container(self, container, src_object_name,
- dest_object_name, metadata=None):
- """Copy storage object's data to the new object using PUT."""
-
- url = "{0}/{1}".format(container, dest_object_name)
- headers = {}
- headers['X-Copy-From'] = "%s/%s" % (str(container),
- str(src_object_name))
- headers['content-length'] = '0'
- if metadata:
- for key in metadata:
- headers[str(key)] = metadata[key]
-
- resp, body = self.put(url, None, headers=headers)
- self.expected_success(201, resp.status)
- return resp, body
-
- def copy_object_across_containers(self, src_container, src_object_name,
- dst_container, dst_object_name,
- metadata=None):
- """Copy storage object's data to the new object using PUT."""
-
- url = "{0}/{1}".format(dst_container, dst_object_name)
- headers = {}
- headers['X-Copy-From'] = "%s/%s" % (str(src_container),
- str(src_object_name))
- headers['content-length'] = '0'
- if metadata:
- for key in metadata:
- headers[str(key)] = metadata[key]
-
- resp, body = self.put(url, None, headers=headers)
- self.expected_success(201, resp.status)
- return resp, body
-
def copy_object_2d_way(self, container, src_object_name, dest_object_name,
metadata=None):
"""Copy storage object's data to the new object using COPY."""
@@ -135,38 +102,6 @@
self.expected_success(201, resp.status)
return resp, body
- def create_object_segments(self, container, object_name, segment, data):
- """Creates object segments."""
- url = "{0}/{1}/{2}".format(container, object_name, segment)
- resp, body = self.put(url, data)
- self.expected_success(201, resp.status)
- return resp, body
-
- def put_object_with_chunk(self, container, name, contents):
- """Put an object with Transfer-Encoding header
-
- :param container: name of the container
- :type container: string
- :param name: name of the object
- :type name: string
- :param contents: object data
- :type contents: iterable
- """
- headers = {'Transfer-Encoding': 'chunked'}
- if self.token:
- headers['X-Auth-Token'] = self.token
-
- url = "%s/%s" % (container, name)
- resp, body = self.put(
- url, headers=headers,
- body=contents,
- chunked=True
- )
-
- self._error_checker(resp, body)
- self.expected_success(201, resp.status)
- return resp.status, resp.reason, resp
-
def create_object_continue(self, container, object_name,
data, metadata=None):
"""Put an object using Expect:100-continue"""
@@ -183,8 +118,7 @@
path = str(parsed.path) + "/"
path += "%s/%s" % (str(container), str(object_name))
- conn = create_connection(parsed)
-
+ conn = _create_connection(parsed)
# Send the PUT request and the headers including the "Expect" header
conn.putrequest('PUT', path)
@@ -218,7 +152,7 @@
return resp.status, resp.reason
-def create_connection(parsed_url):
+def _create_connection(parsed_url):
"""Helper function to create connection with httplib
:param parsed_url: parsed url of the remote location
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
index 067b4e8..0fac6bd 100644
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ b/tempest/lib/services/volume/v1/encryption_types_client.py
@@ -49,9 +49,9 @@
def create_encryption_type(self, volume_type_id, **kwargs):
"""Create encryption type.
- TODO: Current api-site doesn't contain this API description.
- After fixing the api-site, we need to fix here also for putting
- the link to api-site.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-an-encryption-type-for-v2
"""
url = "/types/%s/encryption" % volume_type_id
post_body = json.dumps({'encryption': kwargs})
diff --git a/tempest/lib/services/volume/v1/hosts_client.py b/tempest/lib/services/volume/v1/hosts_client.py
index 56ba12c..9b19b84 100644
--- a/tempest/lib/services/volume/v1/hosts_client.py
+++ b/tempest/lib/services/volume/v1/hosts_client.py
@@ -23,8 +23,12 @@
"""Client class to send CRUD Volume Host API V1 requests"""
def list_hosts(self, **params):
- """Lists all hosts."""
+ """Lists all hosts.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-all-hosts
+ """
url = 'os-hosts'
if params:
url += '?%s' % urllib.urlencode(params)
diff --git a/tempest/lib/services/volume/v1/qos_client.py b/tempest/lib/services/volume/v1/qos_client.py
index e247b7b..593bddd 100644
--- a/tempest/lib/services/volume/v1/qos_client.py
+++ b/tempest/lib/services/volume/v1/qos_client.py
@@ -92,7 +92,9 @@
:param keys: keys to delete from the QoS specification.
- TODO(jordanP): Add a link once LP #1524877 is fixed.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#unset-keys-in-qos-specification
"""
put_body = json.dumps({'keys': keys})
resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
diff --git a/tempest/lib/services/volume/v1/quotas_client.py b/tempest/lib/services/volume/v1/quotas_client.py
index 678fd82..84f34f2 100644
--- a/tempest/lib/services/volume/v1/quotas_client.py
+++ b/tempest/lib/services/volume/v1/quotas_client.py
@@ -47,7 +47,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref-blockstorage-v1.html#updateQuota
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-quotas
"""
put_body = jsonutils.dumps({'quota_set': kwargs})
resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
diff --git a/tempest/lib/services/volume/v1/snapshots_client.py b/tempest/lib/services/volume/v1/snapshots_client.py
index 3433e68..51f7b9b 100644
--- a/tempest/lib/services/volume/v1/snapshots_client.py
+++ b/tempest/lib/services/volume/v1/snapshots_client.py
@@ -27,7 +27,8 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#list-snapshots-with-details-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-snapshots
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
"""
url = 'snapshots'
if detail:
@@ -45,7 +46,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#show-snapshot-details-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
"""
url = "snapshots/%s" % snapshot_id
resp, body = self.get(url)
@@ -58,7 +59,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-snapshot-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-snapshot
"""
post_body = json.dumps({'snapshot': kwargs})
resp, body = self.post('snapshots', post_body)
@@ -71,7 +72,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#delete-snapshot-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#delete-snapshot
"""
resp, body = self.delete("snapshots/%s" % snapshot_id)
self.expected_success(202, resp.status)
@@ -123,7 +124,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-snapshot-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-snapshot
"""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
@@ -136,7 +137,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#show-snapshot-metadata-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
"""
url = "snapshots/%s/metadata" % snapshot_id
resp, body = self.get(url)
@@ -149,7 +150,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-snapshot-metadata-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
"""
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata" % snapshot_id
diff --git a/tempest/lib/services/volume/v1/types_client.py b/tempest/lib/services/volume/v1/types_client.py
index 4ae9935..58a80b7 100644
--- a/tempest/lib/services/volume/v1/types_client.py
+++ b/tempest/lib/services/volume/v1/types_client.py
@@ -40,7 +40,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#list-volume-types-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-all-volume-types-for-v2
"""
url = 'types'
if params:
@@ -56,7 +56,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#show-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#show-volume-type-details-for-v2
"""
url = "types/%s" % volume_type_id
resp, body = self.get(url)
@@ -69,7 +69,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-volume-type-for-v2
"""
post_body = json.dumps({'volume_type': kwargs})
resp, body = self.post('types', post_body)
@@ -82,7 +82,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#delete-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#delete-volume-type
"""
resp, body = self.delete("types/%s" % volume_type_id)
self.expected_success(202, resp.status)
@@ -137,7 +137,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-volume-type
"""
put_body = json.dumps({'volume_type': kwargs})
resp, body = self.put('types/%s' % volume_type_id, put_body)
@@ -155,7 +155,7 @@
updated value.
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-extra-specs-for-a-volume-type-v1
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
"""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_spec_name)
put_body = json.dumps(extra_specs)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
index 7a25697..0e6ea9f 100644
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -38,6 +38,11 @@
"""List all the volumes created.
Params can be a string (must be urlencoded) or a dictionary.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-volumes
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
"""
url = 'volumes'
if detail:
@@ -63,7 +68,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-volume
"""
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
@@ -76,7 +81,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#update-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#update-volume
"""
put_body = json.dumps({'volume': kwargs})
resp, body = self.put('volumes/%s' % volume_id, put_body)
@@ -104,7 +109,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#attach-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
"""
post_body = json.dumps({'os-attach': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -161,7 +166,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#extend-volume
+ https://developer.openstack.org/api-ref/block-storage/v2/#extend-volume-size
"""
post_body = json.dumps({'os-extend': kwargs})
url = 'volumes/%s/action' % (volume_id)
@@ -174,7 +179,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#reset-volume-status
+ https://developer.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
"""
post_body = json.dumps({'os-reset_status': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
@@ -186,7 +191,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#create-volume-transfer
+ https://developer.openstack.org/api-ref/block-storage/v2/#create-volume-transfer
"""
post_body = json.dumps({'transfer': kwargs})
resp, body = self.post('os-volume-transfer', post_body)
@@ -207,7 +212,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#list-volume-transfers
+ https://developer.openstack.org/api-ref/block-storage/v2/#list-volume-transfers
"""
url = 'os-volume-transfer'
if params:
@@ -228,7 +233,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref/block-storage/v1/#accept-volume-transfer
+ https://developer.openstack.org/api-ref/block-storage/v2/#accept-volume-transfer
"""
url = 'os-volume-transfer/%s/accept' % transfer_id
post_body = json.dumps({'accept': kwargs})
diff --git a/tempest/lib/services/volume/v2/encryption_types_client.py b/tempest/lib/services/volume/v2/encryption_types_client.py
index 20f3356..b99d1fe 100644
--- a/tempest/lib/services/volume/v2/encryption_types_client.py
+++ b/tempest/lib/services/volume/v2/encryption_types_client.py
@@ -47,6 +47,14 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def show_encryption_specs_item(self, volume_type_id, key):
+ """Get the encryption specs item for the specified volume type."""
+ url = "/types/%s/encryption/%s" % (volume_type_id, key)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def create_encryption_type(self, volume_type_id, **kwargs):
"""Create encryption type.
diff --git a/tempest/lib/services/volume/v2/quota_classes_client.py b/tempest/lib/services/volume/v2/quota_classes_client.py
index d40d2d9..733b1ac 100644
--- a/tempest/lib/services/volume/v2/quota_classes_client.py
+++ b/tempest/lib/services/volume/v2/quota_classes_client.py
@@ -26,8 +26,9 @@
def show_quota_class_set(self, quota_class_id):
"""List quotas for a quota class.
- TODO: Current api-site doesn't contain this API description.
- LP: https://bugs.launchpad.net/nova/+bug/1602400
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/index.html#show-quota-classes
"""
url = 'os-quota-class-sets/%s' % quota_class_id
resp, body = self.get(url)
@@ -38,8 +39,9 @@
def update_quota_class_set(self, quota_class_id, **kwargs):
"""Update quotas for a quota class.
- TODO: Current api-site doesn't contain this API description.
- LP: https://bugs.launchpad.net/nova/+bug/1602400
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/index.html#update-quota-classes
"""
url = 'os-quota-class-sets/%s' % quota_class_id
put_body = json.dumps({'quota_class_set': kwargs})
diff --git a/tempest/lib/services/volume/v2/volumes_client.py b/tempest/lib/services/volume/v2/volumes_client.py
index d13e449..da3f2b5 100644
--- a/tempest/lib/services/volume/v2/volumes_client.py
+++ b/tempest/lib/services/volume/v2/volumes_client.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from debtcollector import moves
-from debtcollector import removals
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
@@ -22,43 +20,12 @@
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
-from tempest.lib.services.volume.v2 import transfers_client
class VolumesClient(base_client.BaseClient):
"""Client class to send CRUD Volume V2 API requests"""
api_version = "v2"
- create_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.create_volume_transfer,
- 'VolumesClient.create_volume_transfer', __name__,
- message='Use create_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- show_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.show_volume_transfer,
- 'VolumesClient.show_volume_transfer', __name__,
- message='Use show_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- list_volume_transfers = moves.moved_function(
- transfers_client.TransfersClient.list_volume_transfers,
- 'VolumesClient.list_volume_transfers', __name__,
- message='Use list_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- delete_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.delete_volume_transfer,
- 'VolumesClient.delete_volume_transfer', __name__,
- message='Use delete_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
- accept_volume_transfer = moves.moved_function(
- transfers_client.TransfersClient.accept_volume_transfer,
- 'VolumesClient.accept_volume_transfer', __name__,
- message='Use accept_volume_transfer from new location.',
- version='Pike', removal_version='Queens')
-
def _prepare_params(self, params):
"""Prepares params for use in get or _ext_get methods.
@@ -372,34 +339,6 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
- @removals.remove(message="use list_pools from tempest.lib.services."
- "volume.v2.scheduler_stats_client")
- def show_pools(self, detail=False):
- # List all the volumes pools (hosts)
- url = 'scheduler-stats/get_pools'
- if detail:
- url += '?detail=True'
-
- resp, body = self.get(url)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- @removals.remove(message="use show_backend_capabilities from tempest.lib."
- "services.volume.v2.capabilities_client")
- def show_backend_capabilities(self, host):
- """Shows capabilities for a storage back end.
-
- For a full list of available parameters, please refer to the official
- API reference:
- http://developer.openstack.org/api-ref/block-storage/v2/#show-back-end-capabilities
- """
- url = 'capabilities/%s' % host
- resp, body = self.get(url)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
-
def unmanage_volume(self, volume_id):
"""Unmanage volume.
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index 97bac48..ecbcba1 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -75,3 +75,67 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def update_group_type(self, group_type_id, **kwargs):
+ """Updates a group type.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-group-type
+ """
+ post_body = json.dumps({'group_type': kwargs})
+ resp, body = self.put('group_types/%s' % group_type_id, post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def create_or_update_group_type_specs(self, group_type_id, group_specs):
+ """Creates new group specs or updates existing group specs.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#create-or-update-group-specs-for-a-group-type
+ """
+ url = "group_types/%s/group_specs" % group_type_id
+ post_body = json.dumps({'group_specs': group_specs})
+ resp, body = self.post(url, post_body)
+ body = json.loads(body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_group_type_specs(self, group_type_id):
+ """Lists all group specs for a given group type."""
+ url = 'group_types/%s/group_specs' % group_type_id
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_group_type_specs_item(self, group_type_id, spec_id):
+ """Shows specified item of group specs for a given group type."""
+ url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_group_type_specs_item(self, group_type_id, spec_id, spec):
+ """Updates specified item of group specs for a given group type.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-one-specific-group-spec-for-a-group-type
+ """
+ url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
+ put_body = json.dumps(spec)
+ resp, body = self.put(url, put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_group_type_specs_item(self, group_type_id, spec_id):
+ """Deletes specified item of group specs for a given group type."""
+ resp, body = self.delete("group_types/%s/group_specs/%s" % (
+ group_type_id, spec_id))
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
index c1dcccc..efcd139 100644
--- a/tempest/scenario/README.rst
+++ b/tempest/scenario/README.rst
@@ -14,11 +14,12 @@
Any scenario test should have a real-life use case. An example would be:
- - "As operator I want to start with a blank environment":
- 1. upload a glance image
- 2. deploy a vm from it
- 3. ssh to the guest
- 4. create a snapshot of the vm
+- "As operator I want to start with a blank environment":
+
+ 1. upload a glance image
+ 2. deploy a vm from it
+ 3. ssh to the guest
+ 4. create a snapshot of the vm
Why are these tests in Tempest?
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 91b92d9..06aa531 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -89,16 +89,14 @@
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
- def _create_port(self, network_id, client=None, namestart='port-quotatest',
- **kwargs):
+ def create_port(self, network_id, client=None, **kwargs):
if not client:
client = self.ports_client
- name = data_utils.rand_name(namestart)
+ name = data_utils.rand_name(self.__class__.__name__)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
- self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
@@ -147,8 +145,7 @@
if vnic_type:
ports = []
- create_port_body = {'binding:vnic_type': vnic_type,
- 'namestart': 'port-smoke'}
+ create_port_body = {'binding:vnic_type': vnic_type}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
@@ -185,9 +182,9 @@
for net in networks:
net_id = net.get('uuid', net.get('id'))
if 'port' not in net:
- port = self._create_port(network_id=net_id,
- client=clients.ports_client,
- **create_port_body)
+ port = self.create_port(network_id=net_id,
+ client=clients.ports_client,
+ **create_port_body)
ports.append({'port': port['id']})
else:
ports.append({'port': net['port']})
@@ -504,24 +501,6 @@
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
- def rebuild_server(self, server_id, image=None,
- preserve_ephemeral=False, wait=True,
- rebuild_kwargs=None):
- if image is None:
- image = CONF.compute.image_ref
-
- rebuild_kwargs = rebuild_kwargs or {}
-
- LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
- server_id, image, preserve_ephemeral)
- self.servers_client.rebuild_server(
- server_id=server_id, image_ref=image,
- preserve_ephemeral=preserve_ephemeral,
- **rebuild_kwargs)
- if wait:
- waiters.wait_for_server_status(self.servers_client,
- server_id, 'ACTIVE')
-
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
@@ -670,9 +649,7 @@
addresses = server['addresses'][
CONF.validation.network_for_ssh]
else:
- creds_provider = self._get_credentials_provider()
- net_creds = creds_provider.get_primary_creds()
- network = getattr(net_creds, 'network', None)
+ network = self.get_tenant_network()
addresses = (server['addresses'][network['name']]
if network else [])
for address in addresses:
@@ -727,17 +704,14 @@
network['id'])
return network
- def _create_subnet(self, network, subnets_client=None,
- routers_client=None, namestart='subnet-smoke',
- **kwargs):
+ def create_subnet(self, network, subnets_client=None,
+ namestart='subnet-smoke', **kwargs):
"""Create a subnet for the given network
within the cidr block configured for tenant networks.
"""
if not subnets_client:
subnets_client = self.subnets_client
- if not routers_client:
- routers_client = self.routers_client
def cidr_in_use(cidr, tenant_id):
"""Check cidr existence
@@ -880,11 +854,11 @@
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
+ def check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True,
+ servers_for_debug=None):
if not CONF.network.project_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
@@ -904,16 +878,13 @@
self._log_net_info(e)
raise
- def _check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
+ def check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
"""assert ping server via source ssh connection
- Note: This is an internal method. Use check_remote_connectivity
- instead.
-
:param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
+ :param dest: an IP to ping against
+ :param should_succeed: boolean: should ping succeed or not
:param nic: specific network interface to ping from
"""
def ping_remote():
@@ -925,28 +896,19 @@
return not should_succeed
return should_succeed
- return test_utils.call_until_true(ping_remote,
- CONF.validation.ping_timeout,
- 1)
+ result = test_utils.call_until_true(ping_remote,
+ CONF.validation.ping_timeout, 1)
+ if result:
+ return
- def check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
- """assert ping server via source ssh connection
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :param nic: specific network interface to ping from
- """
- result = self._check_remote_connectivity(source, dest, should_succeed,
- nic)
source_host = source.ssh_client.host
if should_succeed:
msg = "Timed out waiting for %s to become reachable from %s" \
% (dest, source_host)
else:
msg = "%s is reachable from %s" % (dest, source_host)
- self.assertTrue(result, msg)
+ self._log_console_output()
+ self.fail(msg)
def _create_security_group(self, security_group_rules_client=None,
tenant_id=None,
@@ -1124,31 +1086,18 @@
body = client.show_router(router_id)
return body['router']
elif network_id:
- router = self._create_router(client, tenant_id)
- kwargs = {'external_gateway_info': dict(network_id=network_id)}
- router = client.update_router(router['id'], **kwargs)['router']
+ router = client.create_router(
+ name=data_utils.rand_name(self.__class__.__name__ + '-router'),
+ admin_state_up=True,
+ tenant_id=tenant_id,
+ external_gateway_info=dict(network_id=network_id))['router']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_router, router['id'])
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
- def _create_router(self, client=None, tenant_id=None,
- namestart='router-smoke'):
- if not client:
- client = self.routers_client
- if not tenant_id:
- tenant_id = client.tenant_id
- name = data_utils.rand_name(namestart)
- result = client.create_router(name=name,
- admin_state_up=True,
- tenant_id=tenant_id)
- router = result['router']
- self.assertEqual(router['name'], name)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_router,
- router['id'])
- return router
-
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
@@ -1183,12 +1132,11 @@
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
- subnets_client=subnets_client,
- routers_client=routers_client)
+ subnets_client=subnets_client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
- subnet = self._create_subnet(**subnet_kwargs)
+ subnet = self.create_subnet(**subnet_kwargs)
if not routers_client:
routers_client = self.routers_client
router_id = router['id']
@@ -1226,7 +1174,7 @@
LOG.debug("Creating an encryption type for volume type: %s", type_id)
client.create_encryption_type(
type_id, provider=provider, key_size=key_size, cipher=cipher,
- control_location=control_location)['encryption']
+ control_location=control_location)
def create_encrypted_volume(self, encryption_provider, volume_type,
key_size=256, cipher='aes-xts-plain64',
@@ -1278,7 +1226,7 @@
def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
- self.container_client.create_container(name)
+ self.container_client.update_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
LOG.debug('Container %s created', name)
@@ -1324,14 +1272,6 @@
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
- def change_container_acl(self, container_name, acl):
- metadata_param = {'metadata_prefix': 'x-container-',
- 'metadata': {'read': acl}}
- self.container_client.create_update_or_delete_container_metadata(
- container_name, create_update_metadata=metadata_param)
- resp, _ = self.container_client.list_container_metadata(container_name)
- self.assertEqual(resp['x-container-read'], acl)
-
def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 29f1743..2b35e45 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.common import waiters
@@ -101,10 +99,6 @@
return address
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
- @testtools.skipUnless(CONF.network.public_network_id,
- 'The public_network_id option must be specified.')
- @testtools.skipUnless(CONF.network_feature_enabled.floating_ips,
- 'Floating ips are not available')
@utils.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
image = self.glance_image_create()
@@ -126,22 +120,29 @@
self.addCleanup(self.nova_volume_detach, server, volume)
self.cinder_show(volume)
- floating_ip = self.create_floating_ip(server)
- # fetch the server again to make sure the addresses were refreshed
- # after associating the floating IP
+ floating_ip = None
server = self.servers_client.show_server(server['id'])['server']
- address = self._get_floating_ip_in_server_addresses(
- floating_ip, server)
- self.assertIsNotNone(
- address,
- "Failed to find floating IP '%s' in server addresses: %s" %
- (floating_ip['ip'], server['addresses']))
+ if (CONF.network_feature_enabled.floating_ips and
+ CONF.network.floating_network_name):
+ floating_ip = self.create_floating_ip(server)
+ # fetch the server again to make sure the addresses were refreshed
+ # after associating the floating IP
+ server = self.servers_client.show_server(server['id'])['server']
+ address = self._get_floating_ip_in_server_addresses(
+ floating_ip, server)
+ self.assertIsNotNone(
+ address,
+ "Failed to find floating IP '%s' in server addresses: %s" %
+ (floating_ip['ip'], server['addresses']))
+ ssh_ip = floating_ip['ip']
+ else:
+ ssh_ip = self.get_server_ip(server)
self.create_and_add_security_group_to_server(server)
# check that we can SSH to the server before reboot
self.linux_client = self.get_remote_client(
- floating_ip['ip'], private_key=keypair['private_key'],
+ ssh_ip, private_key=keypair['private_key'],
server=server)
self.nova_reboot(server)
@@ -149,25 +150,27 @@
# check that we can SSH to the server after reboot
# (both connections are part of the scenario)
self.linux_client = self.get_remote_client(
- floating_ip['ip'], private_key=keypair['private_key'],
+ ssh_ip, private_key=keypair['private_key'],
server=server)
self.check_disks()
- # delete the floating IP, this should refresh the server addresses
- self.compute_floating_ips_client.delete_floating_ip(floating_ip['id'])
+ if floating_ip:
+ # delete the floating IP, this should refresh the server addresses
+ self.compute_floating_ips_client.delete_floating_ip(
+ floating_ip['id'])
- def is_floating_ip_detached_from_server():
- server_info = self.servers_client.show_server(
- server['id'])['server']
- address = self._get_floating_ip_in_server_addresses(
- floating_ip, server_info)
- return (not address)
+ def is_floating_ip_detached_from_server():
+ server_info = self.servers_client.show_server(
+ server['id'])['server']
+ address = self._get_floating_ip_in_server_addresses(
+ floating_ip, server_info)
+ return (not address)
- if not test_utils.call_until_true(
- is_floating_ip_detached_from_server,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- msg = ("Floating IP '%s' should not be in server addresses: %s" %
- (floating_ip['ip'], server['addresses']))
- raise exceptions.TimeoutException(msg)
+ if not test_utils.call_until_true(
+ is_floating_ip_detached_from_server,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ msg = ("Floating IP '%s' should not be in server addresses: %s"
+ % (floating_ip['ip'], server['addresses']))
+ raise exceptions.TimeoutException(msg)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 340c3c9..e4ab11c 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -83,7 +83,7 @@
should_connect=True):
username = CONF.validation.image_ssh_user
private_key = keypair['private_key']
- self._check_tenant_network_connectivity(
+ self.check_tenant_network_connectivity(
server, username, private_key,
should_connect=should_connect,
servers_for_debug=[server])
@@ -195,6 +195,8 @@
waiters.wait_for_server_status(self.servers_client, server['id'],
'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server['id'])
+ server = self.servers_client.show_server(server['id'])['server']
+ self.assertEqual(resize_flavor, server['flavor']['id'])
self._wait_server_status_and_check_network_connectivity(
server, keypair, floating_ip)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 0c3bf23..ff8837f 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -113,11 +113,16 @@
port_id = None
if boot_with_port:
# create a port on the network and boot with that
- port_id = self._create_port(self.network['id'])['id']
+ port_id = self.create_port(self.network['id'])['id']
self.ports.append({'port': port_id})
server = self._create_server(self.network, port_id)
- self._check_tenant_network_connectivity()
+ ssh_login = CONF.validation.image_ssh_user
+ for server in self.servers:
+ # call the common method in the parent class
+ self.check_tenant_network_connectivity(
+ server, ssh_login, self._get_server_key(server),
+ servers_for_debug=self.servers)
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
@@ -170,15 +175,6 @@
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
- def _check_tenant_network_connectivity(self):
- ssh_login = CONF.validation.image_ssh_user
- for server in self.servers:
- # call the common method in the parent class
- super(TestNetworkBasicOps, self).\
- _check_tenant_network_connectivity(
- server, ssh_login, self._get_server_key(server),
- servers_for_debug=self.servers)
-
def check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True, mtu=None):
@@ -231,10 +227,10 @@
def _create_new_network(self, create_gateway=False):
self.new_net = self._create_network()
if create_gateway:
- self.new_subnet = self._create_subnet(
+ self.new_subnet = self.create_subnet(
network=self.new_net)
else:
- self.new_subnet = self._create_subnet(
+ self.new_subnet = self.create_subnet(
network=self.new_net,
gateway_ip=None)
@@ -425,6 +421,10 @@
def test_mtu_sized_frames(self):
"""Validate that network MTU sized frames fit through."""
self._setup_network_and_servers()
+ # first check that connectivity works in general for the instance
+ self.check_public_network_connectivity(should_connect=True)
+ # now that we checked general connectivity, test that full size frames
+ # can also pass between nodes
self.check_public_network_connectivity(
should_connect=True, mtu=self.network['mtu'])
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index b687aa0..9f4e62b 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -12,8 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import functools
-
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
@@ -78,9 +76,9 @@
if dualnet:
network_v6 = self._create_network()
- sub4 = self._create_subnet(network=network,
- namestart='sub4',
- ip_version=4)
+ sub4 = self.create_subnet(network=network,
+ namestart='sub4',
+ ip_version=4)
router = self._get_router()
self.routers_client.add_router_interface(router['id'],
@@ -93,11 +91,11 @@
self.subnets_v6 = []
for _ in range(n_subnets6):
net6 = network_v6 if dualnet else network
- sub6 = self._create_subnet(network=net6,
- namestart='sub6',
- ip_version=6,
- ipv6_ra_mode=address6_mode,
- ipv6_address_mode=address6_mode)
+ sub6 = self.create_subnet(network=net6,
+ namestart='sub6',
+ ip_version=6,
+ ipv6_ra_mode=address6_mode,
+ ipv6_address_mode=address6_mode)
self.routers_client.add_router_interface(router['id'],
subnet_id=sub6['id'])
@@ -132,7 +130,7 @@
ssh = self.get_remote_client(
ip_address=fip['floating_ip_address'],
username=username, server=srv)
- return ssh, ips, srv["id"]
+ return ssh, ips, srv
def turn_nic6_on(self, ssh, sid, network_id):
"""Turns the IPv6 vNIC on
@@ -163,8 +161,8 @@
n_subnets6=n_subnets6,
dualnet=dualnet)
- sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
- sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)
+ sshv4_1, ips_from_api_1, srv1 = self.prepare_server(networks=net_list)
+ sshv4_2, ips_from_api_2, srv2 = self.prepare_server(networks=net_list)
def guest_has_address(ssh, addr):
return addr in ssh.exec_command("ip address")
@@ -172,8 +170,8 @@
# Turn on 2nd NIC for Cirros when dualnet
if dualnet:
_, network_v6 = net_list
- self.turn_nic6_on(sshv4_1, sid1, network_v6['id'])
- self.turn_nic6_on(sshv4_2, sid2, network_v6['id'])
+ self.turn_nic6_on(sshv4_1, srv1['id'], network_v6['id'])
+ self.turn_nic6_on(sshv4_2, srv2['id'], network_v6['id'])
# get addresses assigned to vNIC as reported by 'ip address' utility
ips_from_ip_1 = sshv4_1.exec_command("ip address")
@@ -183,17 +181,19 @@
for i in range(n_subnets6):
# v6 should be configured since the image supports it
# It can take time for ipv6 automatic address to get assigned
- srv1_v6_addr_assigned = functools.partial(
- guest_has_address, sshv4_1, ips_from_api_1['6'][i])
-
- srv2_v6_addr_assigned = functools.partial(
- guest_has_address, sshv4_2, ips_from_api_2['6'][i])
-
- self.assertTrue(test_utils.call_until_true(srv1_v6_addr_assigned,
- CONF.validation.ping_timeout, 1))
-
- self.assertTrue(test_utils.call_until_true(srv2_v6_addr_assigned,
- CONF.validation.ping_timeout, 1))
+ for srv, ssh, ips in (
+ (srv1, sshv4_1, ips_from_api_1),
+ (srv2, sshv4_2, ips_from_api_2)):
+ ip = ips['6'][i]
+ result = test_utils.call_until_true(
+ guest_has_address,
+ CONF.validation.ping_timeout, 1, ssh, ip)
+ if not result:
+ self._log_console_output(servers=[srv])
+ self.fail(
+ 'Address %s not configured for instance %s, '
+ 'ip address output is\n%s' %
+ (ip, srv['id'], ssh.exec_command("ip address")))
self.check_remote_connectivity(sshv4_1, ips_from_api_2['4'])
self.check_remote_connectivity(sshv4_2, ips_from_api_1['4'])
diff --git a/tempest/scenario/test_object_storage_basic_ops.py b/tempest/scenario/test_object_storage_basic_ops.py
index da0b1e8..cbe321e 100644
--- a/tempest/scenario/test_object_storage_basic_ops.py
+++ b/tempest/scenario/test_object_storage_basic_ops.py
@@ -58,12 +58,18 @@
5. Delete the object and container
"""
container_name = self.create_container()
- obj_name, _ = self.upload_object_to_container(container_name)
+ obj_name, obj_data = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
resp, _ = self.object_client.raw_request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
-
- self.change_container_acl(container_name, '.r:*')
- resp, _ = self.object_client.raw_request(obj_url, 'GET')
+ metadata_param = {'X-Container-Read': '.r:*'}
+ self.container_client.create_update_or_delete_container_metadata(
+ container_name, create_update_metadata=metadata_param,
+ create_update_metadata_prefix='')
+ resp, _ = self.container_client.list_container_metadata(container_name)
+ self.assertEqual(metadata_param['X-Container-Read'],
+ resp['x-container-read'])
+ resp, data = self.object_client.raw_request(obj_url, 'GET')
self.assertEqual(resp.status, 200)
+ self.assertEqual(obj_data, data)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index d4f29ad..89b9fdd 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -42,28 +42,6 @@
super(TestServerAdvancedOps, cls).setup_credentials()
@decorators.attr(type='slow')
- @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
- @testtools.skipUnless(CONF.compute_feature_enabled.resize,
- 'Resize is not available.')
- @utils.services('compute', 'volume')
- def test_resize_volume_backed_server_confirm(self):
- # We create an instance for use in this test
- instance = self.create_server(volume_backed=True)
- instance_id = instance['id']
- resize_flavor = CONF.compute.flavor_ref_alt
- LOG.debug("Resizing instance %s from flavor %s to flavor %s",
- instance['id'], instance['flavor']['id'], resize_flavor)
- self.servers_client.resize_server(instance_id, resize_flavor)
- waiters.wait_for_server_status(self.servers_client, instance_id,
- 'VERIFY_RESIZE')
-
- LOG.debug("Confirming resize of instance %s", instance_id)
- self.servers_client.confirm_resize_server(instance_id)
-
- waiters.wait_for_server_status(self.servers_client, instance_id,
- 'ACTIVE')
-
- @decorators.attr(type='slow')
@decorators.idempotent_id('949da7d5-72c8-4808-8802-e3d70df98e2c')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d5c378e..1be8625 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -43,12 +43,6 @@
* Terminate the instance
"""
- @classmethod
- def skip_checks(cls):
- super(TestServerBasicOps, cls).skip_checks()
- if not CONF.network_feature_enabled.floating_ips:
- raise cls.skipException("Floating ips are not available")
-
def setUp(self):
super(TestServerBasicOps, self).setUp()
self.run_ssh = CONF.validation.run_validation
@@ -56,11 +50,17 @@
def verify_ssh(self, keypair):
if self.run_ssh:
- # Obtain a floating IP
- self.fip = self.create_floating_ip(self.instance)['ip']
+ # Obtain a floating IP if floating_ips is enabled
+ if (CONF.network_feature_enabled.floating_ips and
+ CONF.network.floating_network_name):
+ self.ip = self.create_floating_ip(self.instance)['ip']
+ else:
+ server = self.servers_client.show_server(
+ self.instance['id'])['server']
+ self.ip = self.get_server_ip(server)
# Check ssh
self.ssh_client = self.get_remote_client(
- ip_address=self.fip,
+ ip_address=self.ip,
username=self.ssh_user,
private_key=keypair['private_key'],
server=self.instance)
@@ -75,8 +75,8 @@
result = self.ssh_client.exec_command(cmd)
if result:
msg = ('Failed while verifying metadata on server. Result '
- 'of command "%s" is NOT "%s".' % (cmd, self.fip))
- self.assertEqual(self.fip, result, msg)
+ 'of command "%s" is NOT "%s".' % (cmd, self.ip))
+ self.assertEqual(self.ip, result, msg)
return 'Verification is successful!'
if not test_utils.call_until_true(exec_cmd_and_verify_output,
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 64ea8f6..beb039c 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -208,7 +208,19 @@
# boot instance from EBS image
instance = self.create_server(image_id=image['id'])
- # just ensure that instance booted
+
+ # Verify the server was created from the image
+ created_volume = instance['os-extended-volumes:volumes_attached']
+ self.assertNotEmpty(created_volume, "No volume attachment found.")
+ created_volume_info = self.volumes_client.show_volume(
+ created_volume[0]['id'])['volume']
+ self.assertEqual(instance['id'],
+ created_volume_info['attachments'][0]['server_id'])
+ self.assertEqual(created_volume[0]['id'],
+ created_volume_info['attachments'][0]['volume_id'])
+ self.assertEqual(
+ volume_origin['volume_image_metadata']['image_id'],
+ created_volume_info['volume_image_metadata']['image_id'])
# delete instance
self._delete_server(instance)
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index cd10bbd..ff7996a 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -38,6 +38,11 @@
credentials = ['primary', 'admin']
@classmethod
+ def setup_clients(cls):
+ super(TestVolumeMigrateRetypeAttached, cls).setup_clients()
+ cls.admin_volumes_client = cls.os_admin.volumes_v2_client
+
+ @classmethod
def skip_checks(cls):
super(TestVolumeMigrateRetypeAttached, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
@@ -76,8 +81,10 @@
return source_body['name'], dest_body['name']
def _volume_retype_with_migration(self, volume_id, new_volume_type):
+ # NOTE: The 'on-demand' migration requires admin operation, so
+ # admin_volumes_client() should be used here.
migration_policy = 'on-demand'
- self.volumes_client.retype_volume(
+ self.admin_volumes_client.retype_volume(
volume_id, new_type=new_volume_type,
migration_policy=migration_policy)
waiters.wait_for_volume_retype(self.volumes_client,
diff --git a/tempest/services/object_storage/__init__.py b/tempest/services/object_storage/__init__.py
deleted file mode 100644
index 771ed8f..0000000
--- a/tempest/services/object_storage/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.lib.services.object_storage.account_client import AccountClient
-from tempest.lib.services.object_storage.bulk_middleware_client import \
- BulkMiddlewareClient
-from tempest.lib.services.object_storage.capabilities_client import \
- CapabilitiesClient
-from tempest.services.object_storage.container_client import ContainerClient
-from tempest.services.object_storage.object_client import ObjectClient
-
-__all__ = ['AccountClient', 'BulkMiddlewareClient', 'CapabilitiesClient',
- 'ContainerClient', 'ObjectClient']
diff --git a/tempest/test.py b/tempest/test.py
index 9da85d5..27e0165 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -836,7 +836,7 @@
manager = cls.get_client_manager()
# Make sure cred_provider exists and get a network client
- networks_client = manager.compute_networks_client
+ networks_client = manager.networks_client
cred_provider = cls._get_credentials_provider()
# In case of nova network, isolated tenants are not able to list the
# network configured in fixed_network_name, even if they can use it
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 1206e3f..9c18052 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -76,7 +76,7 @@
conf.register_opt(my_config.service_option,
group='service_available')
conf.register_group(my_config.my_service_group)
- conf.register_opts(my_config.MyService +
+ conf.register_opts(my_config.MyServiceGroup,
my_config.my_service_group)
conf.register_group(my_config.my_service_feature_group)
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index f907bd0..fd9af08 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -44,6 +44,7 @@
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.opts = FakeOpts(version=identity_version)
+ self.patch('oslo_log.log.setup', autospec=True)
def mock_resource_creation(self):
fake_resource = dict(id='id', name='name')
@@ -152,17 +153,14 @@
def test_generate_resources_no_admin(self):
cfg.CONF.set_default('swift', False, group='service_available')
- cfg.CONF.set_default('heat', False, group='service_available')
cfg.CONF.set_default('operator_role', 'fake_operator',
group='object-storage')
cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
group='object-storage')
- cfg.CONF.set_default('stack_owner_role', 'fake_owner',
- group='orchestration')
resources = account_generator.generate_resources(
self.cred_provider, admin=False)
resource_types = [k for k, _ in resources]
- # No admin, no heat, no swift, expect two credentials only
+ # No admin, no swift, expect two credentials only
self.assertEqual(2, len(resources))
# Ensure create_user was invoked twice (two distinct users)
self.assertEqual(2, self.user_create_fixture.mock.call_count)
@@ -179,17 +177,14 @@
def test_generate_resources_admin(self):
cfg.CONF.set_default('swift', False, group='service_available')
- cfg.CONF.set_default('heat', False, group='service_available')
cfg.CONF.set_default('operator_role', 'fake_operator',
group='object-storage')
cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
group='object-storage')
- cfg.CONF.set_default('stack_owner_role', 'fake_owner',
- group='orchestration')
resources = account_generator.generate_resources(
self.cred_provider, admin=True)
resource_types = [k for k, _ in resources]
- # Admin, no heat, no swift, expect three credentials only
+ # Admin, no swift, expect three credentials only
self.assertEqual(3, len(resources))
# Ensure create_user was invoked 3 times (3 distinct users)
self.assertEqual(3, self.user_create_fixture.mock.call_count)
@@ -204,28 +199,24 @@
self.assertIsNotNone(resource[1].router)
self.assertIsNotNone(resource[1].subnet)
- def test_generate_resources_swift_heat_admin(self):
+ def test_generate_resources_swift_admin(self):
cfg.CONF.set_default('swift', True, group='service_available')
- cfg.CONF.set_default('heat', True, group='service_available')
cfg.CONF.set_default('operator_role', 'fake_operator',
group='object-storage')
cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
group='object-storage')
- cfg.CONF.set_default('stack_owner_role', 'fake_owner',
- group='orchestration')
resources = account_generator.generate_resources(
self.cred_provider, admin=True)
resource_types = [k for k, _ in resources]
# all options on, expect six credentials
self.assertEqual(6, len(resources))
# Ensure create_user was invoked 6 times (6 distinct users)
- self.assertEqual(6, self.user_create_fixture.mock.call_count)
+ self.assertEqual(5, self.user_create_fixture.mock.call_count)
self.assertIn('primary', resource_types)
self.assertIn('alt', resource_types)
self.assertIn('admin', resource_types)
self.assertIn(['fake_operator'], resource_types)
self.assertIn(['fake_reseller'], resource_types)
- self.assertIn(['fake_owner', 'fake_operator'], resource_types)
for resource in resources:
self.assertIsNotNone(resource[1].network)
self.assertIsNotNone(resource[1].router)
@@ -257,7 +248,6 @@
self.opts)
self.mock_resource_creation()
cfg.CONF.set_default('swift', True, group='service_available')
- cfg.CONF.set_default('heat', True, group='service_available')
self.resources = account_generator.generate_resources(
self.cred_provider, admin=True)
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 810f9e5..8641b63 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -176,22 +176,6 @@
False, True)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
- def test_verify_keystone_api_versions_no_v2(self, mock_request):
- self.useFixture(fixtures.MockPatchObject(
- verify_tempest_config, '_get_unversioned_endpoint',
- return_value='http://fake_endpoint:5000'))
- fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
- fake_resp = json.dumps(fake_resp)
- mock_request.return_value = (None, fake_resp)
- fake_os = mock.MagicMock()
- with mock.patch.object(verify_tempest_config,
- 'print_and_or_update') as print_mock:
- verify_tempest_config.verify_keystone_api_versions(fake_os, True)
- print_mock.assert_called_once_with('api_v2',
- 'identity-feature-enabled',
- False, True)
-
- @mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v3(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 020818e..7cf87f8 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -183,7 +183,7 @@
# Build the expected params
expected_params = dict(
[(field, value) for _, field, value in all_params])
- expected_params.update(cf.DEFAULT_PARAMS)
+ expected_params.update(config.service_client_config())
admin_creds = cf.get_configured_admin_credentials()
mock_get_credentials.assert_called_once_with(
fill_in=True, identity_version='v3', **expected_params)
@@ -205,7 +205,7 @@
# Build the expected params
expected_params = dict(
[(field, value) for _, field, value in all_params])
- expected_params.update(cf.DEFAULT_PARAMS)
+ expected_params.update(config.service_client_config())
admin_creds = cf.get_configured_admin_credentials(
fill_in=False, identity_version='v3')
mock_get_credentials.assert_called_once_with(
@@ -232,7 +232,7 @@
cfg.CONF.set_default('uri', expected_uri, 'identity')
params = {'foo': 'bar'}
expected_params = params.copy()
- expected_params.update(cf.DEFAULT_PARAMS)
+ expected_params.update(config.service_client_config())
result = cf.get_credentials(identity_version='v2', **params)
self.assertEqual(expected_result, result)
mock_auth_get_credentials.assert_called_once_with(
@@ -251,7 +251,7 @@
params = {'foo': 'bar'}
expected_params = params.copy()
expected_params['domain_name'] = expected_domain
- expected_params.update(cf.DEFAULT_PARAMS)
+ expected_params.update(config.service_client_config())
result = cf.get_credentials(fill_in=False, identity_version='v3',
**params)
self.assertEqual(expected_result, result)
@@ -270,7 +270,7 @@
expected_domain, 'auth')
params = {'foo': 'bar', 'user_domain_name': expected_domain}
expected_params = params.copy()
- expected_params.update(cf.DEFAULT_PARAMS)
+ expected_params.update(config.service_client_config())
result = cf.get_credentials(fill_in=False, identity_version='v3',
**params)
self.assertEqual(expected_result, result)
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index ee63684..4a2fff4 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -39,11 +39,12 @@
self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth',
group='identity')
self.conf.set_default('neutron', True, group='service_available')
- self.conf.set_default('heat', True, group='service_available')
- if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))):
- os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
+ lock_path = str(os.environ.get('OS_TEST_LOCK_PATH',
+ os.environ.get('TMPDIR', '/tmp')))
+ if not os.path.exists(lock_path):
+ os.mkdir(lock_path)
lockutils.set_defaults(
- lock_path=str(os.environ.get('OS_TEST_LOCK_PATH')),
+ lock_path=lock_path,
)
self.conf.set_default('auth_version', 'v2', group='identity')
for config_option in ['username', 'password', 'project_name']:
diff --git a/tempest/tests/lib/cli/test_execute.py b/tempest/tests/lib/cli/test_execute.py
index 0130454..c069af5 100644
--- a/tempest/tests/lib/cli/test_execute.py
+++ b/tempest/tests/lib/cli/test_execute.py
@@ -91,3 +91,61 @@
self.assertEqual(mock_execute.call_count, 1)
self.assertEqual(mock_execute.call_args[1],
{'prefix': 'env LAC_ALL=C'})
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_domain_name(self, mock_execute):
+ cli = cli_base.CLIClient(
+ user_domain_name='default',
+ project_domain_name='default'
+ )
+ cli.glance('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertIn('--os-user-domain-name default',
+ mock_execute.call_args[0][2])
+ self.assertIn('--os-project-domain-name default',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-user-domain-id',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-project-domain-id',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_domain_id(self, mock_execute):
+ cli = cli_base.CLIClient(
+ user_domain_id='default',
+ project_domain_id='default'
+ )
+ cli.glance('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertIn('--os-user-domain-id default',
+ mock_execute.call_args[0][2])
+ self.assertIn('--os-project-domain-id default',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-user-domain-name',
+ mock_execute.call_args[0][2])
+ self.assertNotIn('--os-project-domain-name',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_default_api_version(self, mock_execute):
+ cli = cli_base.CLIClient()
+ cli.openstack('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertNotIn('--os-identity-api-version ',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_empty_api_version(self, mock_execute):
+ cli = cli_base.CLIClient(identity_api_version='')
+ cli.openstack('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertNotIn('--os-identity-api-version ',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_explicit_api_version(self, mock_execute):
+ cli = cli_base.CLIClient(identity_api_version='0.0')
+ cli.openstack('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertIn('--os-identity-api-version 0.0 ',
+ mock_execute.call_args[0][2])
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index 9b10159..25df2a7 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -339,7 +339,7 @@
return_value=test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
- with mock.patch('tempest.lib.services.compute.networks_client.'
+ with mock.patch('tempest.lib.services.network.networks_client.'
'NetworksClient.list_networks',
return_value={'networks': [{'name': 'network-2',
'id': 'fake-id',
diff --git a/tempest/tests/lib/common/utils/test_data_utils.py b/tempest/tests/lib/common/utils/test_data_utils.py
index 8bdf70e..b8385b2 100644
--- a/tempest/tests/lib/common/utils/test_data_utils.py
+++ b/tempest/tests/lib/common/utils/test_data_utils.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import netaddr
-
from tempest.lib.common.utils import data_utils
from tempest.tests import base
@@ -81,7 +79,11 @@
self.assertEqual(len(actual), 3)
self.assertRegex(actual, "[A-Za-z0-9~!@#%^&*_=+]{3}")
actual2 = data_utils.rand_password(2)
- self.assertNotEqual(actual, actual2)
+ # NOTE(masayukig): Originally, we checked that the acutal and actual2
+ # are different each other. But only 3 letters can be the same value
+ # in a very rare case. So, we just check the length here, too,
+ # just in case.
+ self.assertEqual(len(actual2), 3)
def test_rand_url(self):
actual = data_utils.rand_url()
@@ -137,43 +139,6 @@
actual = data_utils.random_bytes(size=2048)
self.assertEqual(2048, len(actual))
- def test_get_ipv6_addr_by_EUI64(self):
- actual = data_utils.get_ipv6_addr_by_EUI64('2001:db8::',
- '00:16:3e:33:44:55')
- self.assertIsInstance(actual, netaddr.IPAddress)
- self.assertEqual(actual,
- netaddr.IPAddress('2001:db8::216:3eff:fe33:4455'))
-
- def test_get_ipv6_addr_by_EUI64_with_IPv4_prefix(self):
- ipv4_prefix = '10.0.8'
- mac = '00:16:3e:33:44:55'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- ipv4_prefix, mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_cidr_type(self):
- bad_cidr = 123
- mac = '00:16:3e:33:44:55'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- bad_cidr, mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_cidr_value(self):
- bad_cidr = 'bb'
- mac = '00:16:3e:33:44:55'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- bad_cidr, mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_mac_value(self):
- cidr = '2001:db8::'
- bad_mac = '00:16:3e:33:44:5Z'
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- cidr, bad_mac)
-
- def test_get_ipv6_addr_by_EUI64_bad_mac_type(self):
- cidr = '2001:db8::'
- bad_mac = 99999999999999999999
- self.assertRaises(TypeError, data_utils.get_ipv6_addr_by_EUI64,
- cidr, bad_mac)
-
def test_chunkify(self):
data = "aaa"
chunks = data_utils.chunkify(data, 2)
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index 29c5684..f638ba6 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -81,11 +81,13 @@
@mock.patch('time.sleep')
@mock.patch('time.time')
def test_call_until_true_when_f_never_returns_true(self, m_time, m_sleep):
+ def set_value(bool_value):
+ return bool_value
timeout = 42 # The value doesn't matter as we mock time.time()
sleep = 60 # The value doesn't matter as we mock time.sleep()
m_time.side_effect = utils.generate_timeout_series(timeout)
self.assertEqual(
- False, test_utils.call_until_true(lambda: False, timeout, sleep)
+ False, test_utils.call_until_true(set_value, timeout, sleep, False)
)
m_sleep.call_args_list = [mock.call(sleep)] * 2
m_time.call_args_list = [mock.call()] * 2
@@ -93,11 +95,30 @@
@mock.patch('time.sleep')
@mock.patch('time.time')
def test_call_until_true_when_f_returns_true(self, m_time, m_sleep):
+ def set_value(bool_value=False):
+ return bool_value
timeout = 42 # The value doesn't matter as we mock time.time()
sleep = 60 # The value doesn't matter as we mock time.sleep()
m_time.return_value = 0
self.assertEqual(
- True, test_utils.call_until_true(lambda: True, timeout, sleep)
+ True, test_utils.call_until_true(set_value, timeout, sleep,
+ bool_value=True)
)
self.assertEqual(0, m_sleep.call_count)
- self.assertEqual(1, m_time.call_count)
+ # when logging cost time we need to acquire current time.
+ self.assertEqual(2, m_time.call_count)
+
+ @mock.patch('time.sleep')
+ @mock.patch('time.time')
+ def test_call_until_true_when_f_returns_true_no_param(
+ self, m_time, m_sleep):
+ def set_value(bool_value=False):
+ return bool_value
+ timeout = 42 # The value doesn't matter as we mock time.time()
+ sleep = 60 # The value doesn't matter as we mock time.sleep()
+ m_time.side_effect = utils.generate_timeout_series(timeout)
+ self.assertEqual(
+ False, test_utils.call_until_true(set_value, timeout, sleep)
+ )
+ m_sleep.call_args_list = [mock.call(sleep)] * 2
+ m_time.call_args_list = [mock.call()] * 2
diff --git a/tempest/tests/lib/services/identity/v3/test_identity_client.py b/tempest/tests/lib/services/identity/v3/test_identity_client.py
index 6572947..3739fe6 100644
--- a/tempest/tests/lib/services/identity/v3/test_identity_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_identity_client.py
@@ -60,6 +60,34 @@
}
}
+ FAKE_AUTH_DOMAINS = {
+ "domains": [
+ {
+ "description": "my domain description",
+ "enabled": True,
+ "id": "1789d1",
+ "links": {
+ "self": "https://example.com/identity/v3/domains/1789d1"
+ },
+ "name": "my domain"
+ },
+ {
+ "description": "description of my other domain",
+ "enabled": True,
+ "id": "43e8da",
+ "links": {
+ "self": "https://example.com/identity/v3/domains/43e8da"
+ },
+ "name": "another domain"
+ }
+ ],
+ "links": {
+ "self": "https://example.com/identity/v3/auth/domains",
+ "previous": None,
+ "next": None
+ }
+ }
+
def setUp(self):
super(TestIdentityClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -89,6 +117,13 @@
self.FAKE_AUTH_PROJECTS,
bytes_body)
+ def _test_list_auth_domains(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_auth_domains,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_AUTH_DOMAINS,
+ bytes_body)
+
def test_show_api_description_with_str_body(self):
self._test_show_api_description()
@@ -122,3 +157,9 @@
def test_list_auth_projects_with_bytes_body(self):
self._test_list_auth_projects(bytes_body=True)
+
+ def test_list_auth_domains_with_str_body(self):
+ self._test_list_auth_domains()
+
+ def test_list_auth_domains_with_bytes_body(self):
+ self._test_list_auth_domains(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_networks_client.py b/tempest/tests/lib/services/network/test_networks_client.py
new file mode 100644
index 0000000..078f4b0
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_networks_client.py
@@ -0,0 +1,242 @@
+# Copyright 2017 AT&T Corporation.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import networks_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestNetworksClient(base.BaseServiceTest):
+
+ FAKE_NETWORKS = {
+ "networks": [
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "mtu": 0,
+ "name": "net1",
+ "port_security_enabled": True,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "router:external": False,
+ "shared": False,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": True,
+ "description": ""
+ },
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "mtu": 0,
+ "name": "net2",
+ "port_security_enabled": True,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819",
+ "router:external": False,
+ "shared": False,
+ "status": "ACTIVE",
+ "subnets": [
+ "08eae331-0402-425a-923c-34f7cfe39c1b"
+ ],
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": False,
+ "description": ""
+ }
+ ]
+ }
+
+ FAKE_NETWORK_ID = "d32019d3-bc6e-4319-9c1d-6722fc136a22"
+
+ FAKE_NETWORK1 = {
+ "name": "net1",
+ "admin_state_up": True,
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e"
+ }
+
+ FAKE_NETWORK2 = {
+ "name": "net2",
+ "admin_state_up": True,
+ "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819"
+ }
+
+ FAKE_NETWORKS_REQ = {
+ "networks": [
+ FAKE_NETWORK1,
+ FAKE_NETWORK2
+ ]
+ }
+
+ FAKE_DHCP_AGENT_NETWORK_ID = "80515c45-651f-4f9a-b82b-2ca8a7301a8d"
+
+ FAKE_DHCP_AGENTS = {
+ "agents": [
+ {
+ "binary": "neutron-dhcp-agent",
+ "description": None,
+ "admin_state_up": True,
+ "heartbeat_timestamp": "2017-06-22 18:29:50",
+ "availability_zone": "nova",
+ "alive": True,
+ "topic": "dhcp_agent",
+ "host": "osboxes",
+ "agent_type": "DHCP agent",
+ "resource_versions": {},
+ "created_at": "2017-06-19 21:39:51",
+ "started_at": "2017-06-19 21:39:51",
+ "id": "b6cfb7a1-6ac4-4980-993c-9d295d37062e",
+ "configurations": {
+ "subnets": 2,
+ "dhcp_lease_duration": 86400,
+ "dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
+ "networks": 1,
+ "log_agent_heartbeats": False,
+ "ports": 3
+ }
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestNetworksClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.networks_client = networks_client.NetworksClient(
+ fake_auth, "network", "regionOne")
+
+ def _test_list_networks(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.list_networks,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_NETWORKS,
+ bytes_body,
+ 200)
+
+ def _test_create_network(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.create_network,
+ "tempest.lib.common.rest_client.RestClient.post",
+ {"network": self.FAKE_NETWORKS["networks"][0]},
+ bytes_body,
+ 201,
+ **self.FAKE_NETWORK1)
+
+ def _test_create_bulk_networks(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.create_bulk_networks,
+ "tempest.lib.common.rest_client.RestClient.post",
+ self.FAKE_NETWORKS,
+ bytes_body,
+ 201,
+ networks=self.FAKE_NETWORKS_REQ)
+
+ def _test_show_network(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.show_network,
+ "tempest.lib.common.rest_client.RestClient.get",
+ {"network": self.FAKE_NETWORKS["networks"][0]},
+ bytes_body,
+ 200,
+ network_id=self.FAKE_NETWORK_ID)
+
+ def _test_update_network(self, bytes_body=False):
+ update_kwargs = {
+ "name": "sample_network_5_updated",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e"
+ }
+
+ resp_body = {
+ "network": copy.deepcopy(
+ self.FAKE_NETWORKS["networks"][0]
+ )
+ }
+ resp_body["network"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.networks_client.update_network,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 200,
+ network_id=self.FAKE_NETWORK_ID,
+ **update_kwargs)
+
+ def _test_list_dhcp_agents_on_hosting_network(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.list_dhcp_agents_on_hosting_network,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_DHCP_AGENTS,
+ bytes_body,
+ 200,
+ network_id=self.FAKE_DHCP_AGENT_NETWORK_ID)
+
+ def test_delete_network(self):
+ self.check_service_client_function(
+ self.networks_client.delete_network,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ network_id=self.FAKE_NETWORK_ID)
+
+ def test_list_networks_with_str_body(self):
+ self._test_list_networks()
+
+ def test_list_networks_with_bytes_body(self):
+ self._test_list_networks(bytes_body=True)
+
+ def test_create_network_with_str_body(self):
+ self._test_create_network()
+
+ def test_create_network_with_bytes_body(self):
+ self._test_create_network(bytes_body=True)
+
+ def test_create_bulk_network_with_str_body(self):
+ self._test_create_bulk_networks()
+
+ def test_create_bulk_network_with_bytes_body(self):
+ self._test_create_bulk_networks(bytes_body=True)
+
+ def test_show_network_with_str_body(self):
+ self._test_show_network()
+
+ def test_show_network_with_bytes_body(self):
+ self._test_show_network(bytes_body=True)
+
+ def test_update_network_with_str_body(self):
+ self._test_update_network()
+
+ def test_update_network_with_bytes_body(self):
+ self._test_update_network(bytes_body=True)
+
+ def test_list_dhcp_agents_on_hosting_network_with_str_body(self):
+ self._test_list_dhcp_agents_on_hosting_network()
+
+ def test_list_dhcp_agents_on_hosting_network_with_bytes_body(self):
+ self._test_list_dhcp_agents_on_hosting_network(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_quotas_client.py b/tempest/tests/lib/services/network/test_quotas_client.py
index e76bc9c..aa6c1a1 100644
--- a/tempest/tests/lib/services/network/test_quotas_client.py
+++ b/tempest/tests/lib/services/network/test_quotas_client.py
@@ -38,8 +38,62 @@
]
}
+ FAKE_PROJECT_QUOTAS = {
+ "quota": {
+ "floatingip": 50,
+ "network": 10,
+ "port": 50,
+ "rbac_policy": -1,
+ "router": 10,
+ "security_group": 10,
+ "security_group_rule": 100,
+ "subnet": 10,
+ "subnetpool": -1
+ }
+ }
+
FAKE_QUOTA_TENANT_ID = "bab7d5c60cd041a0a36f7c4b6e1dd978"
+ FAKE_QUOTA_DETAILS = {
+ "quota": {
+ "rbac_policy": {
+ "used": 4,
+ "limit": 10,
+ "reserved": 0
+ },
+ "subnetpool": {
+ "used": 2,
+ "limit": -1,
+ "reserved": 0
+ },
+ "security_group_rule": {
+ "used": 10,
+ "limit": 100,
+ "reserved": 1
+ },
+ "security_group": {
+ "used": 3,
+ "limit": 10,
+ "reserved": 0
+ },
+ "subnet": {
+ "used": 3,
+ "limit": 100,
+ "reserved": 0
+ },
+ "port": {
+ "used": 21,
+ "limit": 500,
+ "reserved": 3
+ },
+ "network": {
+ "used": 9,
+ "limit": 100,
+ "reserved": 2
+ }
+ }
+ }
+
def setUp(self):
super(TestQuotasClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -58,7 +112,16 @@
self.check_service_client_function(
self.quotas_client.show_quotas,
"tempest.lib.common.rest_client.RestClient.get",
- {"quota": self.FAKE_QUOTAS["quotas"][0]},
+ self.FAKE_PROJECT_QUOTAS,
+ bytes_body,
+ 200,
+ tenant_id=self.FAKE_QUOTA_TENANT_ID)
+
+ def _test_show_default_quotas(self, bytes_body=False):
+ self.check_service_client_function(
+ self.quotas_client.show_default_quotas,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_PROJECT_QUOTAS,
bytes_body,
200,
tenant_id=self.FAKE_QUOTA_TENANT_ID)
@@ -67,7 +130,16 @@
self.check_service_client_function(
self.quotas_client.update_quotas,
"tempest.lib.common.rest_client.RestClient.put",
- {"quota": self.FAKE_QUOTAS["quotas"][0]},
+ self.FAKE_PROJECT_QUOTAS,
+ bytes_body,
+ 200,
+ tenant_id=self.FAKE_QUOTA_TENANT_ID)
+
+ def _test_show_quota_details(self, bytes_body=False):
+ self.check_service_client_function(
+ self.quotas_client.show_quota_details,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_QUOTA_DETAILS,
bytes_body,
200,
tenant_id=self.FAKE_QUOTA_TENANT_ID)
@@ -92,8 +164,20 @@
def test_show_quotas_with_bytes_body(self):
self._test_show_quotas(bytes_body=True)
+ def test_show_default_quotas_with_str_body(self):
+ self._test_show_default_quotas()
+
+ def test_show_default_quotas_with_bytes_body(self):
+ self._test_show_default_quotas(bytes_body=True)
+
def test_update_quotas_with_str_body(self):
self._test_update_quotas()
def test_update_quotas_with_bytes_body(self):
self._test_update_quotas(bytes_body=True)
+
+ def test_show_quota_details_with_str_body(self):
+ self._test_show_quota_details()
+
+ def test_show_quota_details_with_bytes_body(self):
+ self._test_show_quota_details(bytes_body=True)
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
new file mode 100644
index 0000000..a16d1d7
--- /dev/null
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -0,0 +1,108 @@
+# Copyright 2016 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from tempest.lib import exceptions
+from tempest.lib.services.object_storage import object_client
+from tempest.tests import base
+from tempest.tests.lib import fake_auth_provider
+
+
+class TestObjectClient(base.TestCase):
+
+ def setUp(self):
+ super(TestObjectClient, self).setUp()
+ self.fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.url = self.fake_auth.base_url(None)
+ self.object_client = object_client.ObjectClient(self.fake_auth,
+ 'swift', 'region1')
+
+ @mock.patch.object(object_client, '_create_connection')
+ def test_create_object_continue_no_data(self, mock_poc):
+ self._validate_create_object_continue(None, mock_poc)
+
+ @mock.patch.object(object_client, '_create_connection')
+ def test_create_object_continue_with_data(self, mock_poc):
+ self._validate_create_object_continue('hello', mock_poc)
+
+ @mock.patch.object(object_client, '_create_connection')
+ def test_create_continue_with_no_continue_received(self, mock_poc):
+ self._validate_create_object_continue('hello', mock_poc,
+ initial_status=201)
+
+ def _validate_create_object_continue(self, req_data,
+ mock_poc, initial_status=100):
+
+ expected_hdrs = {
+ 'X-Auth-Token': self.fake_auth.get_token(),
+ 'content-length': 0 if req_data is None else len(req_data),
+ 'Expect': '100-continue'}
+
+ # Setup the Mocks prior to invoking the object creation
+ mock_resp_cls = mock.Mock()
+ mock_resp_cls._read_status.return_value = ("1", initial_status, "OK")
+
+ mock_poc.return_value.response_class.return_value = mock_resp_cls
+
+ # This is the final expected return value
+ mock_poc.return_value.getresponse.return_value.status = 201
+ mock_poc.return_value.getresponse.return_value.reason = 'OK'
+
+ # Call method to PUT object using expect:100-continue
+ cnt = "container1"
+ obj = "object1"
+ path = "/%s/%s" % (cnt, obj)
+
+ # If the expected initial status is not 100, then an exception
+ # should be thrown and the connection closed
+ if initial_status is 100:
+ status, reason = \
+ self.object_client.create_object_continue(cnt, obj, req_data)
+ else:
+ self.assertRaises(exceptions.UnexpectedResponseCode,
+ self.object_client.create_object_continue, cnt,
+ obj, req_data)
+ mock_poc.return_value.close.assert_called_once_with()
+
+ # Verify that putrequest is called 1 time with the appropriate values
+ mock_poc.return_value.putrequest.assert_called_once_with('PUT', path)
+
+ # Verify that headers were written, including "Expect:100-continue"
+ calls = []
+
+ for header, value in expected_hdrs.items():
+ calls.append(mock.call(header, value))
+
+ mock_poc.return_value.putheader.assert_has_calls(calls, False)
+ mock_poc.return_value.endheaders.assert_called_once_with()
+
+ # The following steps are only taken if the initial status is 100
+ if initial_status is 100:
+ # Verify that the method returned what it was supposed to
+ self.assertEqual(status, 201)
+
+ # Verify that _safe_read was called once to remove the CRLF
+ # after the 100 response
+ mock_rc = mock_poc.return_value.response_class.return_value
+ mock_rc._safe_read.assert_called_once_with(2)
+
+ # Verify the actual data was written via send
+ mock_poc.return_value.send.assert_called_once_with(req_data)
+
+ # Verify that the getresponse method was called to receive
+ # the final
+ mock_poc.return_value.getresponse.assert_called_once_with()
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
index 8484209..1da2112 100644
--- a/tempest/tests/lib/services/registry_fixture.py
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -38,7 +38,7 @@
"""Initialise the registry fixture"""
self.services = set(['compute', 'identity.v2', 'identity.v3',
'image.v1', 'image.v2', 'network', 'volume.v1',
- 'volume.v2', 'volume.v3'])
+ 'volume.v2', 'volume.v3', 'object-storage'])
def _setUp(self):
# Cleanup the registry
@@ -50,7 +50,7 @@
for sc in self.services:
sc_module = service_clients[sc]
sc_unversioned = sc.split('.')[0]
- sc_name = sc.replace('.', '_')
+ sc_name = sc.replace('.', '_').replace('-', '_')
# Pass the bare minimum params to satisfy the clients interface
service_client_data = dict(
name=sc_name, service_version=sc, service=sc_unversioned,
diff --git a/tempest/tests/lib/services/test_clients.py b/tempest/tests/lib/services/test_clients.py
index 6d0f27a..43fd88f 100644
--- a/tempest/tests/lib/services/test_clients.py
+++ b/tempest/tests/lib/services/test_clients.py
@@ -189,9 +189,7 @@
def setUp(self):
super(TestServiceClients, self).setUp()
self.useFixture(fixtures.MockPatch(
- 'tempest.lib.services.clients.tempest_modules', return_value={}))
- self.useFixture(fixtures.MockPatch(
- 'tempest.lib.services.clients._tempest_internal_modules',
+ 'tempest.lib.services.clients.tempest_modules',
return_value=set(['fake_service1'])))
def test___init___creds_v2_uri(self):
@@ -416,6 +414,7 @@
_manager = self._get_manager()
duplicate_service = 'fake_service1'
expected_error = '.*' + duplicate_service
+ _manager._registered_services = [duplicate_service]
with testtools.ExpectedException(
exceptions.ServiceClientRegistrationException, expected_error):
_manager.register_service_client_module(
diff --git a/tempest/tests/lib/services/volume/v2/test_encryption_types_client.py b/tempest/tests/lib/services/volume/v2/test_encryption_types_client.py
index d029091..8de9fb4 100644
--- a/tempest/tests/lib/services/volume/v2/test_encryption_types_client.py
+++ b/tempest/tests/lib/services/volume/v2/test_encryption_types_client.py
@@ -43,6 +43,10 @@
}
}
+ FAKE_ENCRYPTION_SPECS_ITEM = {
+ "cipher": "aes-xts-plain64"
+ }
+
def setUp(self):
super(TestEncryptionTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -65,6 +69,13 @@
self.FAKE_INFO_ENCRYPTION_TYPE,
bytes_body, volume_type_id="cbc36478b0bd8e67e89")
+ def _test_show_encryption_specs_item(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_encryption_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ENCRYPTION_SPECS_ITEM,
+ bytes_body, volume_type_id="cbc36478b0bd8e67e89", key="cipher")
+
def test_create_encryption_type_with_str_body(self):
self._test_create_encryption()
@@ -77,6 +88,12 @@
def test_show_encryption_type_with_bytes_body(self):
self._test_show_encryption_type(bytes_body=True)
+ def test_show_encryption_specs_item_with_str_body(self):
+ self._test_show_encryption_specs_item()
+
+ def test_show_encryption_specs_item_with_bytes_body(self):
+ self._test_show_encryption_specs_item(bytes_body=True)
+
def test_delete_encryption_type(self):
self.check_service_client_function(
self.client.delete_encryption_type,
diff --git a/tempest/tests/lib/services/volume/v3/test_group_types_client.py b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
index 0f456a2..c60cc36 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
from tempest.lib.services.volume.v3 import group_types_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -67,6 +69,28 @@
]
}
+ FAKE_CREATE_GROUP_TYPE_SPECS = {
+ "group_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ }
+
+ FAKE_LIST_GROUP_TYPE_SPECS = {
+ "group_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ }
+
+ FAKE_SHOW_GROUP_TYPE_SPECS_ITEM = {
+ "key1": "value1"
+ }
+
+ FAKE_UPDATE_GROUP_TYPE_SPECS_ITEM = {
+ "key2": "value2-updated"
+ }
+
def setUp(self):
super(TestGroupTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -97,6 +121,57 @@
self.FAKE_LIST_GROUP_TYPES,
bytes_body)
+ def _test_update_group_types(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_INFO_GROUP_TYPE)
+ resp_body['group_type'].pop('created_at')
+
+ self.check_service_client_function(
+ self.client.update_group_type,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ name='updated-group-type-name')
+
+ def _test_create_or_update_group_type_specs(self, bytes_body=False):
+ group_specs = self.FAKE_CREATE_GROUP_TYPE_SPECS['group_specs']
+ self.check_service_client_function(
+ self.client.create_or_update_group_type_specs,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_GROUP_TYPE_SPECS,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ group_specs=group_specs,
+ status=202)
+
+ def _test_list_group_type_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_group_type_specs,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_GROUP_TYPE_SPECS,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+
+ def _test_show_group_type_specs_item(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_group_type_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SHOW_GROUP_TYPE_SPECS_ITEM,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ spec_id="key1")
+
+ def _test_update_group_type_specs_item(self, bytes_body=False):
+ spec = self.FAKE_UPDATE_GROUP_TYPE_SPECS_ITEM
+ self.check_service_client_function(
+ self.client.update_group_type_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_GROUP_TYPE_SPECS_ITEM,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ spec_id="key2",
+ spec=spec)
+
def test_create_group_type_with_str_body(self):
self._test_create_group_type()
@@ -122,3 +197,42 @@
def test_list_group_types_with_bytes_body(self):
self._test_list_group_types(bytes_body=True)
+
+ def test_update_group_types_with_str_body(self):
+ self._test_update_group_types()
+
+ def test_update_group_types_with_bytes_body(self):
+ self._test_update_group_types(bytes_body=True)
+
+ def test_create_or_update_group_type_specs_with_str_body(self):
+ self._test_create_or_update_group_type_specs()
+
+ def test_create_or_update_group_type_specs_with_bytes_body(self):
+ self._test_create_or_update_group_type_specs(bytes_body=True)
+
+ def test_list_group_type_specs_with_str_body(self):
+ self._test_list_group_type_specs()
+
+ def test_list_group_type_specs_with_bytes_body(self):
+ self._test_list_group_type_specs(bytes_body=True)
+
+ def test_show_group_type_specs_item_with_str_body(self):
+ self._test_show_group_type_specs_item()
+
+ def test_show_group_type_specs_item_with_bytes_body(self):
+ self._test_show_group_type_specs_item(bytes_body=True)
+
+ def test_update_group_type_specs_item_with_str_body(self):
+ self._test_update_group_type_specs_item()
+
+ def test_update_group_type_specs_item_with_bytes_body(self):
+ self._test_update_group_type_specs_item(bytes_body=True)
+
+ def test_delete_group_type_specs_item(self):
+ self.check_service_client_function(
+ self.client.delete_group_type_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ group_type_id='0e58433f-d108-4bf3-a22c-34e6b71ef86b',
+ spec_id='key1',
+ status=202)
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index bbebcd3..ed0eea3 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -125,35 +125,6 @@
self.assertRaises(ValueError, self._test_helper, _id)
-class TestSkipUnlessAttrDecorator(base.TestCase):
- def _test_skip_unless_attr(self, attr, expected_to_skip=True):
- class TestFoo(test.BaseTestCase):
- expected_attr = not expected_to_skip
-
- @decorators.skip_unless_attr(attr)
- def test_foo(self):
- pass
-
- t = TestFoo('test_foo')
- if expected_to_skip:
- self.assertRaises(testtools.TestCase.skipException,
- t.test_foo)
- else:
- try:
- t.test_foo()
- except Exception:
- raise testtools.TestCase.failureException()
-
- def test_skip_attr_does_not_exist(self):
- self._test_skip_unless_attr('unexpected_attr')
-
- def test_skip_attr_false(self):
- self._test_skip_unless_attr('expected_attr')
-
- def test_no_skip_for_attr_exist_and_true(self):
- self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
-
-
class TestRelatedBugDecorator(base.TestCase):
def test_relatedbug_when_no_exception(self):
f = mock.Mock()
diff --git a/tempest/tests/services/object_storage/__init__.py b/tempest/tests/services/object_storage/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/tests/services/object_storage/__init__.py
+++ /dev/null
diff --git a/tempest/tests/services/object_storage/test_object_client.py b/tempest/tests/services/object_storage/test_object_client.py
index 748614c..86535f9 100644
--- a/tempest/tests/services/object_storage/test_object_client.py
+++ b/tempest/tests/services/object_storage/test_object_client.py
@@ -31,15 +31,15 @@
self.object_client = object_client.ObjectClient(self.fake_auth,
'swift', 'region1')
- @mock.patch.object(object_client, 'create_connection')
+ @mock.patch.object(object_client, '_create_connection')
def test_create_object_continue_no_data(self, mock_poc):
self._validate_create_object_continue(None, mock_poc)
- @mock.patch.object(object_client, 'create_connection')
+ @mock.patch.object(object_client, '_create_connection')
def test_create_object_continue_with_data(self, mock_poc):
self._validate_create_object_continue('hello', mock_poc)
- @mock.patch.object(object_client, 'create_connection')
+ @mock.patch.object(object_client, '_create_connection')
def test_create_continue_with_no_continue_received(self, mock_poc):
self._validate_create_object_continue('hello', mock_poc,
initial_status=201)
diff --git a/tempest/tests/test_base_test.py b/tempest/tests/test_base_test.py
index 3ece11d..2b5a947 100644
--- a/tempest/tests/test_base_test.py
+++ b/tempest/tests/test_base_test.py
@@ -17,6 +17,7 @@
from tempest import clients
from tempest.common import credentials_factory as credentials
+from tempest import config
from tempest.lib.common import fixed_network
from tempest import test
from tempest.tests import base
@@ -27,6 +28,8 @@
def setUp(self):
super(TestBaseTestCase, self).setUp()
self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
self.fixed_network_name = 'fixed-net'
cfg.CONF.set_default('fixed_network_name', self.fixed_network_name,
'compute')
@@ -38,7 +41,7 @@
def test_get_tenant_network(self, mock_gtn, mock_gprov, mock_gcm):
net_client = mock.Mock()
mock_prov = mock.Mock()
- mock_gcm.return_value.compute_networks_client = net_client
+ mock_gcm.return_value.networks_client = net_client
mock_gprov.return_value = mock_prov
test.BaseTestCase.get_tenant_network()
@@ -82,7 +85,7 @@
mock_gcm):
net_client = mock.Mock()
mock_prov = mock.Mock()
- mock_gcm.return_value.compute_networks_client = net_client
+ mock_gcm.return_value.networks_client = net_client
mock_gprov.return_value = mock_prov
test.BaseTestCase.get_tenant_network(credentials_type='alt')
@@ -99,7 +102,7 @@
mock_gcm):
net_client = mock.Mock()
mock_prov = mock.Mock()
- mock_gcm.return_value.compute_networks_client = net_client
+ mock_gcm.return_value.networks_client = net_client
mock_gprov.return_value = mock_prov
creds = ['foo_type', 'role1']
diff --git a/tempest/tests/test_imports.py b/tempest/tests/test_imports.py
new file mode 100644
index 0000000..6f1cfca
--- /dev/null
+++ b/tempest/tests/test_imports.py
@@ -0,0 +1,69 @@
+# Copyright 2017 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from tempest.tests import base
+
+
+class ConfCounter(object):
+
+ def __init__(self, *args, **kwargs):
+ self.count = 0
+
+ def __getattr__(self, key):
+ self.count += 1
+ return mock.MagicMock()
+
+ def get_counts(self):
+ return self.count
+
+
+class TestImports(base.TestCase):
+ def setUp(self):
+ super(TestImports, self).setUp()
+ self.conf_mock = self.patch('tempest.config.CONF',
+ new_callable=ConfCounter)
+
+ def test_account_generator_command_import(self):
+ from tempest.cmd import account_generator # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_cleanup_command_import(self):
+ from tempest.cmd import cleanup # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_init_command_import(self):
+ from tempest.cmd import init # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_list_plugins_command_import(self):
+ from tempest.cmd import list_plugins # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_run_command_import(self):
+ from tempest.cmd import run # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_subunit_descibe_command_import(self):
+ from tempest.cmd import subunit_describe_calls # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_verify_tempest_config_command_import(self):
+ from tempest.cmd import verify_tempest_config # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
+
+ def test_workspace_command_import(self):
+ from tempest.cmd import workspace # noqa
+ self.assertEqual(0, self.conf_mock.get_counts())
diff --git a/test-requirements.txt b/test-requirements.txt
index 37644d0..e33f207 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,11 +2,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
-# needed for doc build
-sphinx>=1.6.2 # BSD
-openstackdocstheme>=1.17.0 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
mock>=2.0.0 # BSD
coverage!=4.4,>=4.0 # Apache-2.0
-oslotest>=1.10.0 # Apache-2.0
+oslotest>=3.2.0 # Apache-2.0
flake8-import-order==0.11 # LGPLv3
diff --git a/tools/check_logs.py b/tools/check_logs.py
index fc21f75..b80ccc0 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -62,7 +62,7 @@
for (name, filename) in file_specs:
whitelist = whitelists.get(name, [])
with open(filename) as content:
- if scan_content(name, content, regexp, whitelist):
+ if scan_content(content, regexp, whitelist):
logs_with_errors.append(name)
for (name, url) in url_specs:
whitelist = whitelists.get(name, [])
@@ -71,12 +71,12 @@
page = urlreq.urlopen(req)
buf = six.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
- if scan_content(name, f.read().splitlines(), regexp, whitelist):
+ if scan_content(f.read().splitlines(), regexp, whitelist):
logs_with_errors.append(name)
return logs_with_errors
-def scan_content(name, content, regexp, whitelist):
+def scan_content(content, regexp, whitelist):
had_errors = False
for line in content:
if not line.startswith("Stderr:") and regexp.match(line):
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
deleted file mode 100755
index 1f2b88b..0000000
--- a/tools/find_stack_traces.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import gzip
-import pprint
-import re
-import sys
-
-import six
-import six.moves.urllib.request as urlreq
-
-
-pp = pprint.PrettyPrinter()
-
-NOVA_TIMESTAMP = r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d"
-
-NOVA_REGEX = r"(?P<timestamp>%s) (?P<pid>\d+ )?(?P<level>(ERROR|TRACE)) " \
- "(?P<module>[\w\.]+) (?P<msg>.*)" % (NOVA_TIMESTAMP)
-
-
-class StackTrace(object):
- timestamp = None
- pid = None
- level = ""
- module = ""
- msg = ""
-
- def __init__(self, timestamp=None, pid=None, level="", module="",
- msg=""):
- self.timestamp = timestamp
- self.pid = pid
- self.level = level
- self.module = module
- self.msg = msg
-
- def append(self, msg):
- self.msg = self.msg + msg
-
- def is_same(self, data):
- return (data['timestamp'] == self.timestamp and
- data['level'] == self.level)
-
- def not_none(self):
- return self.timestamp is not None
-
- def __str__(self):
- buff = "<%s %s %s>\n" % (self.timestamp, self.level, self.module)
- for line in self.msg.splitlines():
- buff = buff + line + "\n"
- return buff
-
-
-def hunt_for_stacktrace(url):
- """Return TRACE or ERROR lines out of logs."""
- req = urlreq.Request(url)
- req.add_header('Accept-Encoding', 'gzip')
- page = urlreq.urlopen(req)
- buf = six.StringIO(page.read())
- f = gzip.GzipFile(fileobj=buf)
- content = f.read()
-
- traces = []
- trace = StackTrace()
- for line in content.splitlines():
- m = re.match(NOVA_REGEX, line)
- if m:
- data = m.groupdict()
- if trace.not_none() and trace.is_same(data):
- trace.append(data['msg'] + "\n")
- else:
- trace = StackTrace(
- timestamp=data.get('timestamp'),
- pid=data.get('pid'),
- level=data.get('level'),
- module=data.get('module'),
- msg=data.get('msg'))
-
- else:
- if trace.not_none():
- traces.append(trace)
- trace = StackTrace()
-
- # once more at the end to pick up any stragglers
- if trace.not_none():
- traces.append(trace)
-
- return traces
-
-
-def log_url(url, log):
- return "%s/%s" % (url, log)
-
-
-def collect_logs(url):
- page = urlreq.urlopen(url)
- content = page.read()
- logs = re.findall('(screen-[\w-]+\.txt\.gz)</a>', content)
- return logs
-
-
-def usage():
- print("""
-Usage: find_stack_traces.py <logurl>
-
-Hunts for stack traces in a devstack run. Must provide it a base log url
-from a tempest devstack run. Should start with http and end with /logs/.
-
-Returns a report listing stack traces out of the various files where
-they are found.
-""")
- sys.exit(0)
-
-
-def print_stats(items, fname, verbose=False):
- errors = len([x for x in items if x.level == "ERROR"])
- traces = len([x for x in items if x.level == "TRACE"])
- print("%d ERRORS found in %s" % (errors, fname))
- print("%d TRACES found in %s" % (traces, fname))
-
- if verbose:
- for item in items:
- print(item)
- print("\n\n")
-
-
-def main():
- if len(sys.argv) == 2:
- url = sys.argv[1]
- loglist = collect_logs(url)
-
- # probably wrong base url
- if not loglist:
- usage()
-
- for log in loglist:
- logurl = log_url(url, log)
- traces = hunt_for_stacktrace(logurl)
-
- if traces:
- print_stats(traces, log, verbose=True)
-
- else:
- usage()
-
-if __name__ == '__main__':
- main()
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 99df0d1..dd05438 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -28,12 +28,12 @@
try:
# For Python 3.0 and later
- from urllib.error import HTTPError as HTTPError
+ from urllib.error import HTTPError
import urllib.request as urllib
except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as urllib
- from urllib2 import HTTPError as HTTPError
+ from urllib2 import HTTPError
url = 'https://review.openstack.org/projects/'
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index 44bf840..8b4f913 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -120,3 +120,8 @@
failed_plugin+=", $project"
fi
done
+
+# Check for failed status
+if [[ -n $failed_plugin ]]; then
+ exit 1
+fi
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
deleted file mode 100755
index 43468e4..0000000
--- a/tools/tox_install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-
-# Client constraint file contains this client version pin that is in conflict
-# with installing the client from source. We should remove the version pin in
-# the constraints file before applying it for from-source installation.
-
-CONSTRAINTS_FILE=$1
-shift 1
-
-set -e
-
-# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
-# published to logs.openstack.org for easy debugging.
-localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
-
-if [[ $CONSTRAINTS_FILE != http* ]]; then
- CONSTRAINTS_FILE=file://$CONSTRAINTS_FILE
-fi
-# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
-curl $CONSTRAINTS_FILE --insecure --progress-bar --output $localfile
-
-pip install -c$localfile openstack-requirements
-
-# This is the main purpose of the script: Allow local installation of
-# the current repo. It is listed in constraints file and thus any
-# install will be constrained and we need to unconstrain it.
-edit-constraints $localfile -- $CLIENT_NAME
-
-pip install -c$localfile -U $*
-exit $?
diff --git a/tox.ini b/tox.ini
index 21696eb..892b6f4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,9 +8,8 @@
setenv =
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
- BRANCH_NAME=master
- CLIENT_NAME=tempest
deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
[testenv]
@@ -18,14 +17,12 @@
VIRTUAL_ENV={envdir}
OS_LOG_CAPTURE=1
PYTHONWARNINGS=default::DeprecationWarning
- BRANCH_NAME=master
- CLIENT_NAME=tempest
passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
-install_command =
- {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+install_command = pip install {opts} {packages}
whitelist_externals = *
deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
@@ -36,7 +33,17 @@
commands = oslo-config-generator --config-file tempest/cmd/config-generator.tempest.conf
[testenv:cover]
-commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
+setenv =
+ {[testenv]setenv}
+ PYTHON=coverage run --source tempest --parallel-mode
+commands =
+ coverage erase
+ find . -type f -name "*.pyc" -delete
+ stestr --test-path ./tempest/tests run {posargs}
+ coverage combine
+ coverage html -d cover
+ coverage xml -o cover/coverage.xml
+ coverage report
[testenv:all]
envdir = .tox/tempest
@@ -50,17 +57,6 @@
find . -type f -name "*.pyc" -delete
tempest run --regex {posargs}
-[testenv:ostestr]
-sitepackages = {[tempestenv]sitepackages}
-# 'all' includes slow tests
-setenv =
- {[tempestenv]setenv}
- OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
-deps = {[tempestenv]deps}
-commands =
- find . -type f -name "*.pyc" -delete
- ostestr {posargs}
-
[testenv:all-plugin]
sitepackages = True
# 'all' includes slow tests
@@ -127,6 +123,10 @@
tempest run --serial --regex '\[.*\bsmoke\b.*\]' {posargs}
[testenv:venv]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
commands = {posargs}
[testenv:venv-tempest]
@@ -137,9 +137,14 @@
commands = {posargs}
[testenv:docs]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
commands =
- rm -rf doc/build
- python setup.py build_sphinx {posargs}
+ rm -rf doc/build
+ sphinx-build -b html doc/source doc/build/html
+whitelist_externals = rm
[testenv:pep8]
commands =
@@ -165,9 +170,15 @@
import-order-style = pep8
[testenv:releasenotes]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
commands =
- rm -rf releasenotes/build
- sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+ rm -rf releasenotes/build
+ sphinx-build -a -E -W -d releasenotes/build/doctrees \
+ -b html releasenotes/source releasenotes/build/html
+whitelist_externals = rm
[testenv:pip-check-reqs]
# Do not install test-requirements as that will pollute the virtualenv for