Merge "Use addClassResourceCleanup for cleanup of volume resources"
diff --git a/.zuul.yaml b/.zuul.yaml
index ec6c59a..2c066ae 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -14,6 +14,40 @@
post-run: playbooks/post-tempest.yaml
- job:
+ name: tempest-full
+ parent: devstack-tempest
+ # This currently only works on the master branch.
+ # NOTE(andreaf) Only run on master for now.
+ # The negative lookup is redudant but it's a
+ # reminder that we don't want the job running there.
+ branches: ^(?!driverfixes/)master$
+ description: |
+ Base integration test with Neutron networking and py27.
+ Former names for this job where:
+ * legacy-tempest-dsvm-neutron-full
+ * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: True
+
+- job:
+ name: tempest-full-py3
+ parent: tempest-full
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: True
+ FORCE_CONFIG_DRIVE: True
+ ENABLE_FILE_INJECTION: False
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+
+- job:
name: tempest-tox-plugin-sanity-check
parent: tox
description: |
@@ -69,7 +103,7 @@
- openstack/neutron-vpnaas
- openstack/nova-lxd
- openstack/novajoin-tempest-plugin
- - openstack/octavia
+ - openstack/octavia-tempest-plugin
- openstack/oswin-tempest-plugin
- openstack/panko
- openstack/patrole
@@ -98,4 +132,15 @@
- ^playbooks/
- ^roles/
- ^.zuul.yaml$
+ - tempest-full-py3:
+ voting: false
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
- tempest-tox-plugin-sanity-check
diff --git a/HACKING.rst b/HACKING.rst
index 57f0409..f961884 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -121,38 +121,38 @@
Test fixtures and resources
---------------------------
Test level resources should be cleaned-up after the test execution. Clean-up
-is best scheduled using `addCleanup` which ensures that the resource cleanup
+is best scheduled using ``addCleanup`` which ensures that the resource cleanup
code is always invoked, and in reverse order with respect to the creation
order.
-Test class level resources should be defined in the `resource_setup` method of
-the test class, except for any credential obtained from the credentials
-provider, which should be set-up in the `setup_credentials` method.
-Cleanup is best scheduled using `addClassResourceCleanup` which ensures that
+Test class level resources should be defined in the ``resource_setup`` method
+of the test class, except for any credential obtained from the credentials
+provider, which should be set-up in the ``setup_credentials`` method.
+Cleanup is best scheduled using ``addClassResourceCleanup`` which ensures that
the cleanup code is always invoked, and in reverse order with respect to the
creation order.
In both cases - test level and class level cleanups - a wait loop should be
scheduled before the actual delete of resources with an asynchronous delete.
-The test base class `BaseTestCase` defines Tempest framework for class level
-fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
+The test base class ``BaseTestCase`` defines Tempest framework for class level
+fixtures. ``setUpClass`` and ``tearDownClass`` are defined here and cannot be
overwritten by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be overwritten
by test classes. Set-up stages are:
-- `skip_checks`
-- `setup_credentials`
-- `setup_clients`
-- `resource_setup`
+- ``skip_checks``
+- ``setup_credentials``
+- ``setup_clients``
+- ``resource_setup``
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
-- `clear_credentials` (defined in the base test class)
-- `resource_cleanup`
+- ``clear_credentials`` (defined in the base test class)
+- ``resource_cleanup``
Skipping Tests
--------------
@@ -385,7 +385,7 @@
Otherwise the bug fix won't be able to land in the project.
-Handily, `Zuul’s cross-repository dependencies
+Handily, `Zuul's cross-repository dependencies
<https://docs.openstack.org/infra/zuul/user/gating.html#cross-project-dependencies>`_.
can be leveraged to do without step 2 and to have steps 3 and 4 happen
"atomically". To do that, make the patch written in step 1 to depend (refer to
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..555b7d2
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,6 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+openstackdocstheme>=1.17.0 # Apache-2.0
+reno>=2.5.0 # Apache-2.0
+sphinx!=1.6.6,>=1.6.2 # BSD
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index e5f70d2..d0d7320 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -400,7 +400,7 @@
Examples:
* Good - ``http://example.com:1234/v2.0``
- * Wouldn’t work - ``http://example.com:1234/xyz/v2.0/``
+ * Wouldn't work - ``http://example.com:1234/xyz/v2.0/``
(adding prefix/suffix around version etc)
Service Feature Configuration
diff --git a/doc/source/library/credential_providers.rst b/doc/source/library/credential_providers.rst
index d96c97a..d25f85c 100644
--- a/doc/source/library/credential_providers.rst
+++ b/doc/source/library/credential_providers.rst
@@ -49,7 +49,7 @@
public_network_id=CONF.network.public_network_id,
create_networks=(CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network),
- resource_prefix=CONF.resources_prefix,
+ resource_prefix='tempest',
credentials_domain=CONF.auth.default_credentials_domain_name,
admin_role=CONF.identity.admin_role,
identity_uri=CONF.identity.uri_v3,
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 7189312..9c4ac0b 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -302,6 +302,10 @@
.. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
+ * `2.6`_
+
+ .. _2.6: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id5
+
* `2.10`_
.. _2.10: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id9
@@ -310,6 +314,10 @@
.. _2.20: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id18
+ * `2.21`_
+
+ .. _2.21: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id19
+
* `2.25`_
.. _2.25: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-mitaka
@@ -352,6 +360,10 @@
.. _3.12: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id12
+ * `3.13`_
+
+ .. _3.13: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id13
+
* `3.14`_
.. _3.14: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id14
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index 2afb1e5..6f6621d 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -132,7 +132,7 @@
Plugin Structure
================
-While there are no hard and fast rules for the structure a plugin, there are
+While there are no hard and fast rules for the structure of a plugin, there are
basically no constraints on what the plugin looks like as long as the 2 steps
above are done. However, there are some recommended patterns to follow to make
it easy for people to contribute and work with your plugin. For example, if you
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index b57e98f..ddae6e2 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -62,9 +62,9 @@
The Old Way using subunit2sql directly
""""""""""""""""""""""""""""""""""""""
-SELECT * from tests where test_id like "%test_id%";
-(where $test_id is the full test_id, but truncated to the class because of
-setUpClass or tearDownClass failures)
+``SELECT * from tests where test_id like "%test_id%";``
+(where ``$test_id`` is the full test_id, but truncated to the class because of
+``setUpClass`` or ``tearDownClass`` failures)
You can access the infra mysql subunit2sql db w/ read-only permissions with:
@@ -74,15 +74,20 @@
* db_name: subunit2sql
For example if you were trying to remove the test with the id:
-tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON.test_get_flavor_details_for_deleted_flavor
+``tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON.test_get_flavor_details_for_deleted_flavor``
you would run the following:
-#. run: "mysql -u query -p -h logstash.openstack.org subunit2sql" to connect
- to the subunit2sql db
-#. run the query: MySQL [subunit2sql]> select * from tests where test_id like
- "tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON%";
+#. run the command: ``mysql -u query -p -h logstash.openstack.org subunit2sql``
+ to connect to the subunit2sql db
+#. run the query:
+
+ .. code-block:: console
+
+ MySQL [subunit2sql]> select * from tests where test_id like \
+ "tempest.api.compute.admin.test_flavors_negative.FlavorsAdminNegativeTestJSON%";
+
which will return a table of all the tests in the class (but it will also
- catch failures in setUpClass and tearDownClass)
+ catch failures in ``setUpClass`` and ``tearDownClass``)
#. paste the output table with numbers and the mysql command you ran to
generate it into the etherpad.
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 49af95a..fff2405 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -61,7 +61,7 @@
which is executed in that order. Cleanup of resources provisioned during
the resource_setup must be scheduled right after provisioning using
-the addClassResourceCleanp helper. The resource cleanups stacked this way
+the addClassResourceCleanup helper. The resource cleanups stacked this way
are executed in reverse order during tearDownClass, before the cleanup of
test credentials takes place. An example of a TestCase which defines all
of these would be::
diff --git a/playbooks/post-tempest.yaml b/playbooks/post-tempest.yaml
index 820e4f6..70dac09 100644
--- a/playbooks/post-tempest.yaml
+++ b/playbooks/post-tempest.yaml
@@ -2,7 +2,6 @@
become: true
vars:
logs_root: "{{ devstack_base_dir|default('/opt/stack') }}"
- stage_dir: "{{ devstack_base_dir|default('/opt/stack') }}"
test_results_stage_name: 'test_results'
roles:
- role: process-test-results
diff --git a/releasenotes/notes/add-group-type-specs-apis-to-v3-group-types-client-10390b52dedede54.yaml b/releasenotes/notes/add-group-type-specs-apis-to-v3-group-types-client-10390b52dedede54.yaml
new file mode 100644
index 0000000..404319d
--- /dev/null
+++ b/releasenotes/notes/add-group-type-specs-apis-to-v3-group-types-client-10390b52dedede54.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Add group type specs APIs to v3 group_types_client library.
+
+ * create_or_update_group_type_specs
+ * list_group_type_specs
+ * show_group_type_specs_item
+ * update_group_type_specs_item
+ * delete_group_type_specs_item
diff --git a/releasenotes/notes/add-show-default-quotas-api-to-network-quotas-client-3a7c1159af9e56ff.yaml b/releasenotes/notes/add-show-default-quotas-api-to-network-quotas-client-3a7c1159af9e56ff.yaml
new file mode 100644
index 0000000..6efe7e6
--- /dev/null
+++ b/releasenotes/notes/add-show-default-quotas-api-to-network-quotas-client-3a7c1159af9e56ff.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show default quotas API to network quotas_client library.
+ This feature enables the possibility to show default network quotas for
+ a specified project.
diff --git a/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml b/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml
new file mode 100644
index 0000000..14458d6
--- /dev/null
+++ b/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add update group types API to v3 ``group_types_client`` library;
+ min_microversion of this API is 3.11.
diff --git a/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
new file mode 100644
index 0000000..e3443c8
--- /dev/null
+++ b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
@@ -0,0 +1,9 @@
+---
+other:
+ - |
+ The CLIClient class, when it calls a command line client, uses
+ --os-project-name instead of --os-tenant-name for the project, and
+ passes --os-identity-api-version (default empty).
+ All CLI clients still available in supported releases of OpenStack
+ which are wrapped by the cmd_with_auth() method support those
+ switches.
diff --git a/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
index 775a383..a002fb8 100644
--- a/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
+++ b/releasenotes/notes/fix-list-group-snapshots-api-969d9321002c566c.yaml
@@ -1,6 +1,6 @@
---
fixes:
- |
- Fix list_group_snapshots API in v3 group_snapshots_client: Bug#1715786.
+ Fix list_group_snapshots API in v3 group_snapshots_client: Bug#1715786.
The url path for list group snapshots with details API is changed from
``?detail=True`` to ``/detail``.
diff --git a/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml b/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml
new file mode 100644
index 0000000..e15d387
--- /dev/null
+++ b/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ Below config options or feature flags were deprecated for removal.
+ It's time to remove them as all supported stable branches are
+ good to handle them.
+
+ * ``[identity-feature-enabled].forbid_global_implied_dsr``
+ * ``[image-feature-enabled].deactivate_image``
+ * ``[default].resources_prefix``
+ * config group ``orchestration``
+ * ``[service_available].heat``
diff --git a/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml b/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml
new file mode 100644
index 0000000..621731d
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-skip_unless_attr-decorator-02bde59a00328f5c.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Remove the deprecated decorator ``skip_unless_attr`` in lib/decorators.py.
diff --git a/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml b/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml
new file mode 100644
index 0000000..17866e5
--- /dev/null
+++ b/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The tox ostestr job (normally invoked with ``tox -eostestr``) has been
+ removed. This was lightly used, and in the near future ostestr will be
+ removed from the tempest requirements file. If you were relying on this
+ functionality you can replicate it by using the venv-tempest tox job. For
+ example, simply running ``tox -evenv-tempest -- ostestr`` will do the same
+ thing the old ostestr job did.
diff --git a/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml b/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml
new file mode 100644
index 0000000..ddd1704
--- /dev/null
+++ b/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ A new boolean configuration option
+ ``[compute-feature-enabled]/volume_backed_live_migration`` has been added.
+ If enabled, tests which validate the behavior of Nova's *volume-backed live
+ migration* feature will be executed. The option defaults to ``False``.
diff --git a/requirements.txt b/requirements.txt
index 023148b..c02cd05 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,11 +8,11 @@
paramiko>=2.0.0 # LGPLv2.1+
netaddr>=0.7.18 # BSD
testrepository>=0.0.18 # Apache-2.0/BSD
-oslo.concurrency>=3.20.0 # Apache-2.0
-oslo.config>=4.6.0 # Apache-2.0
-oslo.log>=3.30.0 # Apache-2.0
+oslo.concurrency>=3.25.0 # Apache-2.0
+oslo.config>=5.1.0 # Apache-2.0
+oslo.log>=3.36.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.utils>=3.31.0 # Apache-2.0
+oslo.utils>=3.33.0 # Apache-2.0
six>=1.10.0 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
PyYAML>=3.10 # MIT
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 001586e..33dcce9 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -16,10 +16,20 @@
:default: ''
A regular expression used to select the tests.
+
It works only when used with some specific tox environments
('all', 'all-plugin'.)
-.. zuul:rolevar:: tox_venvlist
+ Multi-line and commented regexs can be achieved by doing this:
+
+ ::
+ vars:
+ tempest_test_regex: |
+ (?x) # Ignore comments and whitespaces
+ # Line with only a comment.
+ (tempest\.(api|scenario|thirdparty)).*$ # Run only api scenario and third party
+
+.. zuul:rolevar:: tox_envlist
:default: smoke
The Tempest tox environment to run.
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index 3e57511..85e94f2 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -1,3 +1,3 @@
devstack_base_dir: /opt/stack
tempest_test_regex: ''
-tox_venvlist: smoke
+tox_envlist: smoke
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 297cd72..87898db 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -21,7 +21,7 @@
when: num_cores|int > 3
- name: Run Tempest
- command: tox -e {{tox_venvlist}} -- {{tempest_test_regex|quote}} --concurrency={{tempest_concurrency|default(default_concurrency)}}
+ command: tox -e {{tox_envlist}} -- {{tempest_test_regex|quote}} --concurrency={{tempest_concurrency|default(default_concurrency)}}
args:
chdir: "{{devstack_base_dir}}/tempest"
become: true
diff --git a/tempest/README.rst b/tempest/README.rst
index 663653e..62821de 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -12,10 +12,12 @@
and guidelines. Below is the overview of the Tempest respository structure
to make this clear.
-| tempest/
-| api/ - API tests
-| scenario/ - complex scenario tests
-| tests/ - unit tests for Tempest internals
+ .. code-block:: console
+
+ tempest/
+ api/ - API tests
+ scenario/ - complex scenario tests
+ tests/ - unit tests for Tempest internals
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index a9772c4..c4d5768 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -84,8 +84,7 @@
nets = cls.networks_client.list_networks(
**search_opts).get('networks', [])
if nets:
- raise lib_excs.TempestException(
- 'Found shared networks: %s' % nets)
+ raise cls.skipException('Found shared networks: %s' % nets)
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 08b2d19..711b441 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -56,6 +56,18 @@
# Create a flavor with ephemeral disk
flavor = self.create_flavor(name=flavor_name, ram=ram, vcpus=vcpus,
disk=disk, ephemeral=ephem_disk)
+
+ # Set extra specs same as self.flavor_ref for the created flavor,
+ # because the environment may need some special extra specs to
+ # create server which should have been contained in
+ # self.flavor_ref.
+ extra_spec_keys = \
+ self.admin_flavors_client.list_flavor_extra_specs(
+ self.flavor_ref)['extra_specs']
+ if extra_spec_keys:
+ self.admin_flavors_client.set_flavor_extra_spec(
+ flavor['id'], **extra_spec_keys)
+
return flavor['id']
flavor_with_eph_disk_id = create_flavor_with_ephemeral(ephem_disk=1)
diff --git a/tempest/api/compute/admin/test_keypairs_v210.py b/tempest/api/compute/admin/test_keypairs_v210.py
index e24c7c1..24ea8a1 100644
--- a/tempest/api/compute/admin/test_keypairs_v210.py
+++ b/tempest/api/compute/admin/test_keypairs_v210.py
@@ -34,7 +34,8 @@
k_name = data_utils.rand_name('keypair')
keypair = self.create_keypair(k_name,
keypair_type='ssh',
- user_id=user_id)
+ user_id=user_id,
+ client=self.client)
self.assertEqual(k_name, keypair['name'],
"The created keypair name is not equal "
"to the requested name!")
@@ -56,7 +57,8 @@
self.assertEqual(user_id, keypair_detail['user_id'],
"The fetched keypair is not for requested user!")
# Create a admin keypair
- admin_keypair = self.create_keypair(keypair_type='ssh')
+ admin_keypair = self.create_keypair(keypair_type='ssh',
+ client=self.client)
admin_keypair.pop('private_key', None)
admin_keypair.pop('user_id')
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 411159b..9e897e3 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -132,7 +132,9 @@
def test_live_block_migration_paused(self):
self._test_live_migration(state='PAUSED')
- @decorators.skip_because(bug="1524898")
+ @testtools.skipUnless(CONF.compute_feature_enabled.
+ volume_backed_live_migration,
+ 'Volume-backed live migration not available')
@decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
@utils.services('volume')
def test_volume_backed_live_migration(self):
@@ -228,8 +230,8 @@
while data not in console_output and t <= 120.0:
try:
ws.send_frame(data)
- recieved = ws.receive_frame()
- console_output += recieved
+ received = ws.receive_frame()
+ console_output += received
except Exception:
# In case we had an issue with send/receive on the
# websocket connection, we create a new one.
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index a626ebb..a6b71b2 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -77,6 +77,16 @@
)['flavor']
self.addCleanup(self._flavor_clean_up, flavor['id'])
+ # Set extra specs same as self.flavor_ref for the created flavor,
+ # because the environment may need some special extra specs to
+ # create server which should have been contained in
+ # self.flavor_ref.
+ extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
+ self.flavor_ref)['extra_specs']
+ if extra_spec_keys:
+ self.admin_flavors_client.set_flavor_extra_spec(
+ flavor['id'], **extra_spec_keys)
+
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index acb0d90..87ce39d 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -24,7 +24,7 @@
"""Tests Nova Networks API that usually requires admin privileges.
API docs:
- http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks
+ https://developer.openstack.org/api-ref/compute/#networks-os-networks-deprecated
"""
@classmethod
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 9ee8858..9759be7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -99,6 +99,15 @@
cls.versions_client = cls.os_primary.compute_versions_client
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
+ if CONF.service_available.glance:
+ if CONF.image_feature_enabled.api_v1:
+ cls.images_client = cls.os_primary.image_client
+ elif CONF.image_feature_enabled.api_v2:
+ cls.images_client = cls.os_primary.image_client_v2
+ else:
+ raise lib_exc.InvalidConfiguration(
+ 'Either api_v1 or api_v2 must be True in '
+ '[image-feature-enabled].')
@classmethod
def resource_setup(cls):
@@ -176,11 +185,12 @@
cls.request_microversion)
v2_37_version = api_version_request.APIVersionRequest('2.37')
+ tenant_network = cls.get_tenant_network()
# NOTE(snikitin): since microversion v2.37 'networks' field is required
- if request_version >= v2_37_version and 'networks' not in kwargs:
+ if (request_version >= v2_37_version and 'networks' not in kwargs and
+ not tenant_network):
kwargs['networks'] = 'none'
- tenant_network = cls.get_tenant_network()
body, servers = compute.create_test_server(
cls.os_primary,
validatable,
@@ -254,7 +264,11 @@
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
- """Wrapper utility that returns an image created from the server."""
+ """Wrapper utility that returns an image created from the server.
+
+ If compute microversion >= 2.36, the returned image response will
+ be from the image service API rather than the compute image proxy API.
+ """
name = kwargs.pop('name',
data_utils.rand_name(cls.__name__ + "-image"))
wait_until = kwargs.pop('wait_until', None)
@@ -267,14 +281,21 @@
image_id = image['image_id']
else:
image_id = data_utils.parse_image_id(image.response['location'])
+
+ # The compute image proxy APIs were deprecated in 2.35 so
+ # use the images client directly if the API microversion being
+ # used is >=2.36.
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.36", image.response, "lt"):
+ client = cls.images_client
+ else:
+ client = cls.compute_images_client
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
- cls.compute_images_client.delete_image,
- image_id)
+ client.delete_image, image_id)
if wait_until is not None:
try:
- waiters.wait_for_image_status(cls.compute_images_client,
- image_id, wait_until)
+ waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
# If the image is not found after create_image returned
@@ -292,7 +313,11 @@
image_id=image_id)
else:
raise
- image = cls.compute_images_client.show_image(image_id)['image']
+ image = client.show_image(image_id)
+ # Compute image client returns response wrapped in 'image' element
+ # which is not the case with Glance image client.
+ if 'image' in image:
+ image = image['image']
if wait_until.upper() == 'ACTIVE':
if wait_for_server:
@@ -352,6 +377,13 @@
'VERIFY_RESIZE')
cls.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE')
+ server = cls.servers_client.show_server(server_id)['server']
+ # Nova API > 2.46 no longer includes flavor.id
+ if server['flavor'].get('id'):
+ if new_flavor_id != server['flavor']['id']:
+ msg = ('Flavor id of %s is not equal to new_flavor_id.'
+ % server_id)
+ raise lib_exc.TempestException(msg)
@classmethod
def delete_volume(cls, volume_id):
@@ -439,7 +471,7 @@
# is already detached.
pass
- def attach_volume(self, server, volume, device=None, check_reserved=False):
+ def attach_volume(self, server, volume, device=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -448,15 +480,10 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
- :param check_reserved: Consider a status of reserved as valid for
- completion. This is to handle new Cinder attach where we more
- accurately use 'reserved' for things like attaching to a shelved
- server.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
-
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
@@ -467,11 +494,8 @@
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
- statuses = ['in-use']
- if check_reserved:
- statuses.append('reserved')
waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], statuses)
+ volume['id'], 'in-use')
return attachment
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index efd4f0e..3a474e6 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -30,18 +30,6 @@
class FlavorsV2NegativeTest(base.BaseV2ComputeTest):
- @classmethod
- def setup_clients(cls):
- super(FlavorsV2NegativeTest, cls).setup_clients()
- if CONF.image_feature_enabled.api_v1:
- cls.images_client = cls.os_primary.image_client
- elif CONF.image_feature_enabled.api_v2:
- cls.images_client = cls.os_primary.image_client_v2
- else:
- raise lib_exc.InvalidConfiguration(
- 'Either api_v1 or api_v2 must be True in '
- '[image-feature-enabled].')
-
@decorators.attr(type=['negative'])
@utils.services('image')
@decorators.idempotent_id('90f0d93a-91c1-450c-91e6-07d18172cefe')
diff --git a/tempest/api/compute/keypairs/base.py b/tempest/api/compute/keypairs/base.py
index 0051810..44da88c 100644
--- a/tempest/api/compute/keypairs/base.py
+++ b/tempest/api/compute/keypairs/base.py
@@ -20,17 +20,16 @@
class BaseKeypairTest(base.BaseV2ComputeTest):
"""Base test case class for all keypair API tests."""
- @classmethod
- def setup_clients(cls):
- super(BaseKeypairTest, cls).setup_clients()
- cls.client = cls.keypairs_client
-
- def _delete_keypair(self, keypair_name, **params):
- self.client.delete_keypair(keypair_name, **params)
+ def _delete_keypair(self, keypair_name, client=None, **params):
+ if not client:
+ client = self.keypairs_client
+ client.delete_keypair(keypair_name, **params)
def create_keypair(self, keypair_name=None,
pub_key=None, keypair_type=None,
- user_id=None):
+ user_id=None, client=None):
+ if not client:
+ client = self.keypairs_client
if keypair_name is None:
keypair_name = data_utils.rand_name(
self.__class__.__name__ + '-keypair')
@@ -43,6 +42,7 @@
if user_id:
kwargs.update({'user_id': user_id})
delete_params['user_id'] = user_id
- body = self.client.create_keypair(**kwargs)['keypair']
- self.addCleanup(self._delete_keypair, keypair_name, **delete_params)
+ body = client.create_keypair(**kwargs)['keypair']
+ self.addCleanup(self._delete_keypair, keypair_name,
+ client, **delete_params)
return body
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 3a54d51..66abb21 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -35,7 +35,7 @@
key_list.append(keypair)
# Fetch all keypairs and verify the list
# has all created keypairs
- fetched_list = self.client.list_keypairs()['keypairs']
+ fetched_list = self.keypairs_client.list_keypairs()['keypairs']
new_list = list()
for keypair in fetched_list:
new_list.append(keypair['keypair'])
@@ -61,7 +61,7 @@
# Keypair should be created, Got details by name and deleted
k_name = data_utils.rand_name('keypair')
self.create_keypair(k_name)
- keypair_detail = self.client.show_keypair(k_name)['keypair']
+ keypair_detail = self.keypairs_client.show_keypair(k_name)['keypair']
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
diff --git a/tempest/api/compute/keypairs/test_keypairs_negative.py b/tempest/api/compute/keypairs/test_keypairs_negative.py
index 205076c..f9050a8 100644
--- a/tempest/api/compute/keypairs/test_keypairs_negative.py
+++ b/tempest/api/compute/keypairs/test_keypairs_negative.py
@@ -34,7 +34,8 @@
def test_keypair_delete_nonexistent_key(self):
# Non-existent key deletion should throw a proper error
k_name = data_utils.rand_name("keypair-non-existent")
- self.assertRaises(lib_exc.NotFound, self.client.delete_keypair,
+ self.assertRaises(lib_exc.NotFound,
+ self.keypairs_client.delete_keypair,
k_name)
@decorators.attr(type=['negative'])
@@ -58,11 +59,11 @@
def test_create_keypair_with_duplicate_name(self):
# Keypairs with duplicate names should not be created
k_name = data_utils.rand_name('keypair')
- self.client.create_keypair(name=k_name)
+ self.keypairs_client.create_keypair(name=k_name)
# Now try the same keyname to create another key
self.assertRaises(lib_exc.Conflict, self.create_keypair,
k_name)
- self.client.delete_keypair(k_name)
+ self.keypairs_client.delete_keypair(k_name)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1398abe1-4a84-45fb-9294-89f514daff00')
diff --git a/tempest/api/compute/keypairs/test_keypairs_v22.py b/tempest/api/compute/keypairs/test_keypairs_v22.py
index f39bb12..1aff262 100644
--- a/tempest/api/compute/keypairs/test_keypairs_v22.py
+++ b/tempest/api/compute/keypairs/test_keypairs_v22.py
@@ -32,9 +32,9 @@
# Verify whether 'type' is present in keypair create response of
# version 2.2 and it is with default value 'ssh'.
self._check_keypair_type(keypair, keypair_type)
- keypair_detail = self.client.show_keypair(k_name)['keypair']
+ keypair_detail = self.keypairs_client.show_keypair(k_name)['keypair']
self._check_keypair_type(keypair_detail, keypair_type)
- fetched_list = self.client.list_keypairs()['keypairs']
+ fetched_list = self.keypairs_client.list_keypairs()['keypairs']
for keypair in fetched_list:
# Verify whether 'type' is present in keypair list response of
# version 2.2 and it is with default value 'ssh'.
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0248c65..0e8f681 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -78,8 +78,11 @@
return port
- def _check_interface(self, iface, port_id=None, network_id=None,
- fixed_ip=None, mac_addr=None):
+ def _check_interface(self, iface, server_id=None, port_id=None,
+ network_id=None, fixed_ip=None, mac_addr=None):
+ if server_id:
+ iface = waiters.wait_for_interface_status(
+ self.interfaces_client, server_id, iface['port_id'], 'ACTIVE')
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
@@ -109,9 +112,8 @@
network_id = ifs[0]['net_id']
iface = self.interfaces_client.create_interface(
server['id'], net_id=network_id)['interfaceAttachment']
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, network_id=network_id)
+ self._check_interface(iface, server_id=server['id'],
+ network_id=network_id)
return iface
def _test_create_interface_by_port_id(self, server, ifs):
@@ -121,9 +123,8 @@
self.addCleanup(self.ports_client.delete_port, port_id)
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, port_id=port_id)
+ self._check_interface(iface, server_id=server['id'], port_id=port_id,
+ network_id=network_id)
return iface
def _test_create_interface_by_fixed_ips(self, server, ifs):
@@ -140,9 +141,8 @@
server['id'], net_id=network_id,
fixed_ips=fixed_ips)['interfaceAttachment']
self.addCleanup(self.ports_client.delete_port, iface['port_id'])
- iface = waiters.wait_for_interface_status(
- self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface, fixed_ip=ip_list[0])
+ self._check_interface(iface, server_id=server['id'],
+ fixed_ip=ip_list[0])
return iface
def _test_show_interface(self, server, ifs):
@@ -271,7 +271,8 @@
# attach the port to the server
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
- self._check_interface(iface, port_id=port_id)
+ self._check_interface(iface, server_id=server['id'],
+ port_id=port_id)
# detach the port from the server; this is a cast in the compute
# API so we have to poll the port until the device_id is unset.
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index a126fd6..d857fcb 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -139,6 +139,7 @@
server = self.create_test_server(
validatable=True,
+ wait_until='ACTIVE',
validation_resources=validation_resources,
config_drive=config_drive_enabled,
adminPass=admin_pass,
@@ -205,6 +206,7 @@
self.addCleanup(self.delete_server, server['id'])
+ server = self.servers_client.show_server(server['id'])['server']
self.ssh_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 6c9b287..393e68f 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -119,8 +119,12 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('74745ad8-b346-45b5-b9b8-509d7447fc1f')
def test_list_servers_by_changes_since_future_date(self):
- # Return an empty list when a date in the future is passed
- changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
+ # Return an empty list when a date in the future is passed.
+ # updated_at field may haven't been set at the point in the boot
+ # process where build_request still exists, so add
+ # {'status': 'ACTIVE'} along with changes-since as filter.
+ changes_since = {'changes-since': '2051-01-01T12:34:00Z',
+ 'status': 'ACTIVE'}
body = self.client.list_servers(**changes_since)
self.assertEmpty(body['servers'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index e2be249..5c3e9f0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -582,6 +582,12 @@
compute.shelve_server(self.client, self.server_id,
force_shelve_offload=True)
+ def _unshelve_server():
+ server_info = self.client.show_server(self.server_id)['server']
+ if 'SHELVED' in server_info['status']:
+ self.client.unshelve_server(self.server_id)
+ self.addOnException(_unshelve_server)
+
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index e7591a5..e6a668a 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -21,19 +21,14 @@
class ServerPasswordTestJSON(base.BaseV2ComputeTest):
@classmethod
- def setup_clients(cls):
- super(ServerPasswordTestJSON, cls).setup_clients()
- cls.client = cls.servers_client
-
- @classmethod
def resource_setup(cls):
super(ServerPasswordTestJSON, cls).resource_setup()
cls.server = cls.create_test_server(wait_until="ACTIVE")
@decorators.idempotent_id('f83b582f-62a8-4f22-85b0-0dee50ff783a')
def test_get_server_password(self):
- self.client.show_password(self.server['id'])
+ self.servers_client.show_password(self.server['id'])
@decorators.idempotent_id('f8229e8b-b625-4493-800a-bde86ac611ea')
def test_delete_server_password(self):
- self.client.delete_password(self.server['id'])
+ self.servers_client.delete_password(self.server['id'])
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index d067bb3..9b545af 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -477,6 +477,12 @@
# shelve a shelved server.
compute.shelve_server(self.client, self.server_id)
+ def _unshelve_server():
+ server_info = self.client.show_server(self.server_id)['server']
+ if 'SHELVED' in server_info['status']:
+ self.client.unshelve_server(self.server_id)
+ self.addOnException(_unshelve_server)
+
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index 20923a8..c4e2400 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -28,11 +28,6 @@
cls.set_network_resources()
super(VirtualInterfacesNegativeTestJSON, cls).setup_credentials()
- @classmethod
- def setup_clients(cls):
- super(VirtualInterfacesNegativeTestJSON, cls).setup_clients()
- cls.client = cls.servers_client
-
@decorators.attr(type=['negative'])
@decorators.idempotent_id('64ebd03c-1089-4306-93fa-60f5eb5c803c')
@utils.services('network')
@@ -41,5 +36,5 @@
# for an invalid server_id
invalid_server_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
- self.client.list_virtual_interfaces,
+ self.servers_client.list_virtual_interfaces,
invalid_server_id)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 9bef80f..e6184b7 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -23,12 +23,12 @@
CONF = config.CONF
-class AttachVolumeTestJSON(base.BaseV2ComputeTest):
- max_microversion = '2.19'
+class BaseAttachVolumeTest(base.BaseV2ComputeTest):
+ """Base class for the attach volume tests in this module."""
@classmethod
def skip_checks(cls):
- super(AttachVolumeTestJSON, cls).skip_checks()
+ super(BaseAttachVolumeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@@ -36,11 +36,11 @@
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
- super(AttachVolumeTestJSON, cls).setup_credentials()
+ super(BaseAttachVolumeTest, cls).setup_credentials()
@classmethod
def resource_setup(cls):
- super(AttachVolumeTestJSON, cls).resource_setup()
+ super(BaseAttachVolumeTest, cls).resource_setup()
cls.device = CONF.compute.volume_device_name
def _create_server(self):
@@ -58,6 +58,9 @@
server['id'])['addresses']
return server, validation_resources
+
+class AttachVolumeTestJSON(BaseAttachVolumeTest):
+
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
@@ -149,7 +152,7 @@
self.volumes_client, attachment['volumeId'], 'available')
-class AttachVolumeShelveTestJSON(AttachVolumeTestJSON):
+class AttachVolumeShelveTestJSON(BaseAttachVolumeTest):
"""Testing volume with shelved instance.
This test checks the attaching and detaching volumes from
@@ -223,8 +226,7 @@
num_vol = self._count_volumes(server, validation_resources)
self._shelve_server(server, validation_resources)
attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device),
- check_reserved=True)
+ device=('/dev/%s' % self.device))
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(
@@ -250,8 +252,7 @@
self._shelve_server(server, validation_resources)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device),
- check_reserved=True)
+ self.attach_volume(server, volume, device=('/dev/%s' % self.device))
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index eabb907..7a74869 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -41,3 +41,18 @@
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('aab919e2-d992-4cbb-a4ed-745c2475398c')
+ def test_attach_attached_volume_to_same_server(self):
+ # Test attaching the same volume to the same instance once
+ # it's already attached. The nova/cinder validation for this differs
+ # depending on whether or not cinder v3.27 is being used to attach
+ # the volume to the instance.
+ server = self.create_test_server(wait_until='ACTIVE')
+ volume = self.create_volume()
+
+ self.attach_volume(server, volume)
+
+ self.assertRaises(lib_exc.BadRequest,
+ self.attach_volume, server, volume)
diff --git a/tempest/api/identity/admin/v2/test_tokens.py b/tempest/api/identity/admin/v2/test_tokens.py
index 6b30d23..6ce1a8b 100644
--- a/tempest/api/identity/admin/v2/test_tokens.py
+++ b/tempest/api/identity/admin/v2/test_tokens.py
@@ -112,6 +112,8 @@
@decorators.idempotent_id('ca3ea6f7-ed08-4a61-adbd-96906456ad31')
def test_list_endpoints_for_token(self):
+ tempest_services = ['keystone', 'nova', 'neutron', 'swift', 'cinder',
+ 'neutron']
# get a token for the user
creds = self.os_primary.credentials
username = creds.username
@@ -125,9 +127,10 @@
self.assertIsInstance(endpoints, list)
# Store list of service names
service_names = [e['name'] for e in endpoints]
- # Get the list of available services.
+ # Get the list of available services. Keystone is always available.
available_services = [s[0] for s in list(
- CONF.service_available.items()) if s[1] is True]
+ CONF.service_available.items()) if s[1] is True] + ['keystone']
# Verify that all available services are present.
- for service in available_services:
- self.assertIn(service, service_names)
+ for service in tempest_services:
+ if service in available_services:
+ self.assertIn(service, service_names)
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index 15b2008..ba19ff7 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -32,21 +32,18 @@
u_email = '%s@testmail.tm' % u_name
u_password = data_utils.rand_password()
for _ in range(2):
- cls.project = cls.projects_client.create_project(
+ project = cls.projects_client.create_project(
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'))['project']
- cls.projects.append(cls.project['id'])
+ cls.addClassResourceCleanup(
+ cls.projects_client.delete_project, project['id'])
+ cls.projects.append(project['id'])
cls.user_body = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])['user']
-
- @classmethod
- def resource_cleanup(cls):
- cls.users_client.delete_user(cls.user_body['id'])
- for p in cls.projects:
- cls.projects_client.delete_project(p)
- super(CredentialsTestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(
+ cls.users_client.delete_user, cls.user_body['id'])
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index f731697..c4e0622 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -37,18 +37,6 @@
super(DomainConfigurationTestJSON, cls).setup_clients()
cls.client = cls.domain_config_client
- @classmethod
- def resource_setup(cls):
- super(DomainConfigurationTestJSON, cls).resource_setup()
- cls.group = cls.groups_client.create_group(
- name=data_utils.rand_name('group'),
- description=data_utils.rand_name('group-desc'))['group']
-
- @classmethod
- def resource_cleanup(cls):
- cls.groups_client.delete_group(cls.group['id'])
- super(DomainConfigurationTestJSON, cls).resource_cleanup()
-
def _create_domain_and_config(self, config):
domain = self.setup_test_domain()
config = self.client.create_domain_config(domain['id'], **config)[
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 49dbba1..eef93c2 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -15,6 +15,7 @@
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -28,11 +29,12 @@
@classmethod
def resource_setup(cls):
super(EndPointGroupsTest, cls).resource_setup()
- cls.service_ids = list()
cls.endpoint_groups = list()
# Create endpoint group so as to use it for LIST test
service_id = cls._create_service()
+ cls.addClassResourceCleanup(
+ cls.services_client.delete_service, service_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
@@ -42,18 +44,12 @@
name=name,
description=description,
filters=filters)['endpoint_group']
+ cls.addClassResourceCleanup(
+ cls.client.delete_endpoint_group, endpoint_group['id'])
cls.endpoint_groups.append(endpoint_group)
@classmethod
- def resource_cleanup(cls):
- for e in cls.endpoint_groups:
- cls.client.delete_endpoint_group(e['id'])
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointGroupsTest, cls).resource_cleanup()
-
- @classmethod
def _create_service(cls):
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
@@ -64,7 +60,6 @@
description=s_description))
service_id = service_data['service']['id']
- cls.service_ids.append(service_id)
return service_id
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
@@ -78,6 +73,9 @@
name=name,
description=description,
filters=filters)['endpoint_group']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_endpoint_group, endpoint_group['id'])
self.endpoint_groups.append(endpoint_group)
@@ -115,7 +113,6 @@
# Deleting the endpoint group created in this method
self.client.delete_endpoint_group(endpoint_group['id'])
- self.endpoint_groups.remove(endpoint_group)
# Checking whether endpoint group is deleted successfully
fetched_endpoints = \
@@ -136,10 +133,12 @@
name=name,
description=description,
filters=filters)['endpoint_group']
- self.endpoint_groups.append(endpoint_group)
+ self.addCleanup(self.client.delete_endpoint_group,
+ endpoint_group['id'])
# Creating new attr values to update endpoint group
service2_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service2_id)
name2 = data_utils.rand_name('service_group2')
description2 = data_utils.rand_name('description2')
filters = {'service_id': service2_id}
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 5d48f68..874aaa4 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -15,6 +15,7 @@
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -34,12 +35,18 @@
interfaces = ['public', 'internal']
cls.setup_endpoint_ids = list()
for i in range(2):
- cls._create_service()
+ service = cls._create_service()
+ cls.service_ids.append(service['id'])
+ cls.addClassResourceCleanup(
+ cls.services_client.delete_service, service['id'])
+
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.client.create_endpoint(
service_id=cls.service_ids[i], interface=interfaces[i],
url=url, region=region, enabled=True)['endpoint']
+ cls.addClassResourceCleanup(
+ cls.client.delete_endpoint, endpoint['id'])
cls.setup_endpoint_ids.append(endpoint['id'])
@classmethod
@@ -53,17 +60,7 @@
service_data = (
cls.services_client.create_service(name=s_name, type=s_type,
description=s_description))
- service = service_data['service']
- cls.service_ids.append(service['id'])
- return service
-
- @classmethod
- def resource_cleanup(cls):
- for e in cls.setup_endpoint_ids:
- cls.client.delete_endpoint(e)
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointsTestJSON, cls).resource_cleanup()
+ return service_data['service']
@decorators.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
def test_list_endpoints(self):
@@ -114,8 +111,8 @@
interface=interface,
url=url, region=region,
enabled=True)['endpoint']
-
- self.setup_endpoint_ids.append(endpoint['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_endpoint, endpoint['id'])
# Asserting Create Endpoint response body
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
@@ -137,7 +134,6 @@
# Deleting the endpoint created in this method
self.client.delete_endpoint(endpoint['id'])
- self.setup_endpoint_ids.remove(endpoint['id'])
# Checking whether endpoint is deleted successfully
fetched_endpoints = self.client.list_endpoints()['endpoints']
@@ -147,8 +143,20 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('37e8f15e-ee7c-4657-a1e7-f6b61e375eff')
def test_update_endpoint(self):
- # Creating an endpoint so as to check update endpoint
- # with new values
+ # NOTE(zhufl) Service2 should be created before endpoint_for_update
+ # is created, because Service2 must be deleted after
+ # endpoint_for_update is deleted, otherwise we will get a 404 error
+ # when deleting endpoint_for_update if endpoint's service is deleted.
+
+ # Creating service for updating endpoint with new service ID
+ s_name = data_utils.rand_name('service')
+ s_type = data_utils.rand_name('type')
+ s_description = data_utils.rand_name('description')
+ service2 = self._create_service(s_name=s_name, s_type=s_type,
+ s_description=s_description)
+ self.addCleanup(self.services_client.delete_service, service2['id'])
+
+ # Creating an endpoint so as to check update endpoint with new values
region1 = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
@@ -158,12 +166,7 @@
url=url1, region=region1,
enabled=True)['endpoint'])
self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
- # Creating service so as update endpoint with new service ID
- s_name = data_utils.rand_name('service')
- s_type = data_utils.rand_name('type')
- s_description = data_utils.rand_name('description')
- service2 = self._create_service(s_name=s_name, s_type=s_type,
- s_description=s_description)
+
# Updating endpoint with new values
region2 = data_utils.rand_name('region')
url2 = data_utils.rand_url()
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 70dd7b5..d54e222 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -30,7 +30,6 @@
@classmethod
def resource_setup(cls):
super(EndpointsNegativeTestJSON, cls).resource_setup()
- cls.service_ids = list()
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
@@ -38,14 +37,10 @@
cls.services_client.create_service(name=s_name, type=s_type,
description=s_description)
['service'])
- cls.service_id = service_data['id']
- cls.service_ids.append(cls.service_id)
+ cls.addClassResourceCleanup(cls.services_client.delete_service,
+ service_data['id'])
- @classmethod
- def resource_cleanup(cls):
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndpointsNegativeTestJSON, cls).resource_cleanup()
+ cls.service_id = service_data['id']
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ac6c137e-4d3d-448f-8c83-4f13d0942651')
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index c0c79b9..68c0225 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -36,20 +36,19 @@
data_utils.rand_name('project-'),
description=data_utils.rand_name('project-desc-'),
domain_id=cls.domain['id'])['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.project['id'])
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group-'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
+ cls.addClassResourceCleanup(cls.groups_client.delete_group,
+ cls.group['id'])
cls.user = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
-
- @classmethod
- def resource_cleanup(cls):
- cls.groups_client.delete_group(cls.group['id'])
- cls.users_client.delete_user(cls.user['id'])
- cls.projects_client.delete_project(cls.project['id'])
- super(InheritsV3TestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user['id'])
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 25dd52b..82664e8 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -27,32 +27,27 @@
# Create a domain
cls.domain = cls.create_domain()
# Create project with domain
- cls.projects = list()
cls.p1_name = data_utils.rand_name('project')
cls.p1 = cls.projects_client.create_project(
cls.p1_name, enabled=False,
domain_id=cls.domain['id'])['project']
- cls.projects.append(cls.p1)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p1['id'])
cls.project_ids.append(cls.p1['id'])
# Create default project
p2_name = data_utils.rand_name('project')
cls.p2 = cls.projects_client.create_project(p2_name)['project']
- cls.projects.append(cls.p2)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p2['id'])
cls.project_ids.append(cls.p2['id'])
# Create a new project (p3) using p2 as parent project
p3_name = data_utils.rand_name('project')
cls.p3 = cls.projects_client.create_project(
p3_name, parent_id=cls.p2['id'])['project']
- cls.projects.append(cls.p3)
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p3['id'])
cls.project_ids.append(cls.p3['id'])
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the projects created during setup in inverse order
- for project in reversed(cls.projects):
- cls.projects_client.delete_project(project['id'])
- super(ListProjectsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
def test_list_projects(self):
# List projects
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 88cd8be..c69e4c8 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -47,21 +47,18 @@
cls.domain_enabled_user = cls.users_client.create_user(
name=u1_name, password=alt_password,
email=cls.alt_email, domain_id=cls.domain['id'])['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.domain_enabled_user['id'])
cls.users.append(cls.domain_enabled_user)
# Create default not enabled user
u2_name = data_utils.rand_name('test_user')
cls.non_domain_enabled_user = cls.users_client.create_user(
name=u2_name, password=alt_password,
email=cls.alt_email, enabled=False)['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.non_domain_enabled_user['id'])
cls.users.append(cls.non_domain_enabled_user)
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the users created during setup
- for user in cls.users:
- cls.users_client.delete_user(user['id'])
- super(UsersV3TestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
def test_list_user_domains(self):
# List users with domain
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index d00e408..f22a528 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -34,14 +34,10 @@
r_description = data_utils.rand_name('description')
region = cls.client.create_region(
description=r_description)['region']
+ cls.addClassResourceCleanup(
+ cls.client.delete_region, region['id'])
cls.setup_regions.append(region)
- @classmethod
- def resource_cleanup(cls):
- for r in cls.setup_regions:
- cls.client.delete_region(r['id'])
- super(RegionsTestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('56186092-82e4-43f2-b954-91013218ba42')
def test_create_update_get_delete_region(self):
# Create region
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index e7b005c..69cac33 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -32,6 +32,8 @@
for _ in range(3):
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
+ cls.addClassResourceCleanup(cls.roles_client.delete_role,
+ role['id'])
cls.roles.append(role)
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
@@ -42,25 +44,23 @@
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'),
domain_id=cls.domain['id'])['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.project['id'])
cls.group_body = cls.groups_client.create_group(
name=data_utils.rand_name('Group'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
+ cls.addClassResourceCleanup(cls.groups_client.delete_group,
+ cls.group_body['id'])
cls.user_body = cls.users_client.create_user(
name=u_name, description=u_desc, password=cls.u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user_body['id'])
cls.role = cls.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
-
- @classmethod
- def resource_cleanup(cls):
- cls.roles_client.delete_role(cls.role['id'])
- cls.groups_client.delete_group(cls.group_body['id'])
- cls.users_client.delete_user(cls.user_body['id'])
- cls.projects_client.delete_project(cls.project['id'])
- for role in cls.roles:
- cls.roles_client.delete_role(role['id'])
- super(RolesV3TestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.roles_client.delete_role,
+ cls.role['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
@@ -338,14 +338,13 @@
# domain role to a global one
self._create_implied_role(domain_role1['id'], self.role['id'])
- if CONF.identity_feature_enabled.forbid_global_implied_dsr:
- # The contrary is not true: we can't create an inference rule
- # from a global role to a domain role
- self.assertRaises(
- lib_exc.Forbidden,
- self.roles_client.create_role_inference_rule,
- self.role['id'],
- domain_role1['id'])
+ # The contrary is not true: we can't create an inference rule
+ # from a global role to a domain role
+ self.assertRaises(
+ lib_exc.Forbidden,
+ self.roles_client.create_role_inference_rule,
+ self.role['id'],
+ domain_role1['id'])
@decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
def test_assignments_for_domain_roles(self):
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index c846f88..ce5bd3e 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -18,8 +18,6 @@
import six
-import testtools
-
from oslo_log import log as logging
from tempest.api.image import base
from tempest import config
@@ -128,8 +126,6 @@
self.assertEqual(image['id'], body['id'])
self.assertEqual(new_image_name, body['name'])
- @testtools.skipUnless(CONF.image_feature_enabled.deactivate_image,
- 'deactivate-image is not available.')
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
# Create image
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
index 4d41e33..49a9cdb 100644
--- a/tempest/api/network/admin/test_external_network_extension.py
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -130,5 +130,3 @@
subnet_list = self.admin_subnets_client.list_subnets()
self.assertNotIn(subnet['id'],
(s['id'] for s in subnet_list))
- # Removes subnet from the cleanup list
- self.subnets.remove(subnet)
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index 1a7b0ec..206d867 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -51,7 +51,8 @@
agents = cls.admin_agents_client.list_agents(
agent_type=AGENT_TYPE)['agents']
for agent in agents:
- if agent['configurations']['agent_mode'] in AGENT_MODES:
+ if (agent['configurations']['agent_mode'] in AGENT_MODES and
+ agent['alive']):
cls.agent = agent
break
else:
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index cf4236d..57a28bf 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -80,6 +80,10 @@
non_default_quotas = self.admin_quotas_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(project_id, q['tenant_id'])
+ quota_set = self.admin_quotas_client.show_quotas(project_id)['quota']
+ default_quotas = self.admin_quotas_client.show_default_quotas(
+ project_id)['quota']
+ self.assertEqual(default_quotas, quota_set)
@decorators.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
def test_quotas(self):
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index c2a67e3..8670165 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -88,11 +88,9 @@
@classmethod
def resource_setup(cls):
super(BaseNetworkTest, cls).resource_setup()
- cls.networks = []
cls.subnets = []
cls.ports = []
cls.routers = []
- cls.floating_ips = []
cls.ethertype = "IPv" + str(cls._ip_version)
if cls._ip_version == 4:
cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
@@ -102,32 +100,6 @@
cls.mask_bits = CONF.network.project_network_v6_mask_bits
@classmethod
- def resource_cleanup(cls):
- if CONF.service_available.neutron:
- # Clean up floating IPs
- for floating_ip in cls.floating_ips:
- test_utils.call_and_ignore_notfound_exc(
- cls.floating_ips_client.delete_floatingip,
- floating_ip['id'])
- # Clean up ports
- for port in cls.ports:
- test_utils.call_and_ignore_notfound_exc(
- cls.ports_client.delete_port, port['id'])
- # Clean up routers
- for router in cls.routers:
- test_utils.call_and_ignore_notfound_exc(
- cls.delete_router, router)
- # Clean up subnets
- for subnet in cls.subnets:
- test_utils.call_and_ignore_notfound_exc(
- cls.subnets_client.delete_subnet, subnet['id'])
- # Clean up networks
- for network in cls.networks:
- test_utils.call_and_ignore_notfound_exc(
- cls.networks_client.delete_network, network['id'])
- super(BaseNetworkTest, cls).resource_cleanup()
-
- @classmethod
def create_network(cls, network_name=None, **kwargs):
"""Wrapper utility that returns a test network."""
network_name = network_name or data_utils.rand_name(
@@ -135,7 +107,9 @@
body = cls.networks_client.create_network(name=network_name, **kwargs)
network = body['network']
- cls.networks.append(network)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.networks_client.delete_network,
+ network['id'])
return network
@classmethod
@@ -178,6 +152,9 @@
message = 'Available CIDR for subnet creation could not be found'
raise exceptions.BuildErrorException(message)
subnet = body['subnet']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.subnets_client.delete_subnet,
+ subnet['id'])
cls.subnets.append(subnet)
return subnet
@@ -187,6 +164,8 @@
body = cls.ports_client.create_port(network_id=network['id'],
**kwargs)
port = body['port']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.ports_client.delete_port, port['id'])
cls.ports.append(port)
return port
@@ -213,6 +192,8 @@
name=router_name, external_gateway_info=ext_gw_info,
admin_state_up=admin_state_up, **kwargs)
router = body['router']
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.delete_router, router)
cls.routers.append(router)
return router
@@ -222,7 +203,9 @@
body = cls.floating_ips_client.create_floatingip(
floating_network_id=external_network_id)
fip = body['floatingip']
- cls.floating_ips.append(fip)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.floating_ips_client.delete_floatingip,
+ fip['id'])
return fip
@classmethod
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 1c59556..7345fd1 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -104,15 +104,6 @@
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
expected, exclude_keys))
- def _delete_network(self, network):
- # Deleting network also deletes its subnets if exists
- self.networks_client.delete_network(network['id'])
- if network in self.networks:
- self.networks.remove(network)
- for subnet in self.subnets:
- if subnet['network_id'] == network['id']:
- self.subnets.remove(subnet)
-
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
**kwargs):
network = self.create_network()
@@ -132,8 +123,6 @@
self._compare_resource_attrs(subnet, compare_args)
self.networks_client.delete_network(net_id)
- self.networks.pop()
- self.subnets.pop()
class NetworksTest(BaseNetworkTestResources):
@@ -171,7 +160,7 @@
def test_create_update_delete_network_subnet(self):
# Create a network
network = self.create_network()
- self.addCleanup(self._delete_network, network)
+ self.addCleanup(self.networks_client.delete_network, network['id'])
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
@@ -280,7 +269,7 @@
network = self.create_network()
net_id = network['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_network, network)
+ self.networks_client.delete_network, network['id'])
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
@@ -324,7 +313,7 @@
@decorators.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
- self.addCleanup(self._delete_network, network)
+ self.addCleanup(self.networks_client.delete_network, network['id'])
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
@@ -622,7 +611,6 @@
port = self.create_port(slaac_network)
self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
self.subnets_client.delete_subnet(subnet_slaac['id'])
- self.subnets.pop()
subnets = self.subnets_client.list_subnets()
subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
self.assertNotIn(subnet_slaac['id'], subnet_ids,
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index c9ce55c..ddd7d3a 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -84,6 +84,8 @@
def test_router_remove_interface_in_use_returns_409(self):
self.routers_client.add_router_interface(self.router['id'],
subnet_id=self.subnet['id'])
+ self.addCleanup(self.routers_client.remove_router_interface,
+ self.router['id'], subnet_id=self.subnet['id'])
self.assertRaises(lib_exc.Conflict,
self.routers_client.delete_router,
self.router['id'])
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index ee72163..e8f3f8b 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -36,10 +36,14 @@
using HA proxy sync the deletion properly, otherwise, the container
might fail to be deleted because it's not empty.
- :param containers: List of containers to be deleted
+ :param containers: List of containers(or string of a container)
+ to be deleted
:param container_client: Client to be used to delete containers
:param object_client: Client to be used to delete objects
"""
+ if isinstance(containers, str):
+ containers = [containers]
+
for cont in containers:
try:
params = {'limit': 9999, 'format': 'json'}
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index d7c85a2..c5c30e3 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -44,14 +44,13 @@
for i in range(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % six.int2byte(i))
cls.container_client.update_container(name)
+ cls.addClassResourceCleanup(base.delete_containers,
+ [name],
+ cls.container_client,
+ cls.object_client)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(AccountTest, cls).resource_cleanup()
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
def test_list_containers(self):
@@ -242,7 +241,7 @@
@decorators.idempotent_id('365e6fc7-1cfe-463b-a37c-8bd08d47b6aa')
def test_list_containers_with_prefix(self):
# list containers that have a name that starts with a prefix
- prefix = '{0}-a'.format(CONF.resources_prefix)
+ prefix = 'tempest-a'
params = {'prefix': prefix}
resp, container_list = self.account_client.list_account_containers(
params=params)
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 042d288..322579c 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -33,8 +33,6 @@
class ContainerSyncTest(base.BaseObjectTest):
- clients = {}
-
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@@ -54,6 +52,7 @@
super(ContainerSyncTest, cls).resource_setup()
cls.containers = []
cls.objects = []
+ cls.clients = {}
# Default container-server config only allows localhost
cls.local_ip = '127.0.0.1'
@@ -72,14 +71,12 @@
(cls.container_client_alt, cls.object_client_alt)
for cont_name, client in cls.clients.items():
client[0].create_container(cont_name)
+ cls.addClassResourceCleanup(base.delete_containers,
+ cont_name,
+ client[0],
+ client[1])
cls.containers.append(cont_name)
- @classmethod
- def resource_cleanup(cls):
- for client in cls.clients.values():
- cls.delete_containers(client[0], client[1])
- super(ContainerSyncTest, cls).resource_cleanup()
-
def _test_container_synchronization(self, make_headers):
# container to container synchronization
# to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 51b0a1d..75111b6 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -24,16 +24,6 @@
class ContainerTest(base.BaseObjectTest):
- @classmethod
- def resource_setup(cls):
- super(ContainerTest, cls).resource_setup()
- cls.containers = []
-
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(ContainerTest, cls).resource_cleanup()
-
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
self.assertHeaders(resp, 'Container', 'HEAD')
@@ -52,7 +42,10 @@
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
resp, _ = self.container_client.update_container(vers_container_name)
- self.containers.append(vers_container_name)
+ self.addCleanup(base.delete_containers,
+ [vers_container_name],
+ self.container_client,
+ self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
@@ -61,7 +54,10 @@
resp, _ = self.container_client.update_container(
base_container_name,
**headers)
- self.containers.append(base_container_name)
+ self.addCleanup(base.delete_containers,
+ [base_container_name],
+ self.container_client,
+ self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
new file mode 100644
index 0000000..45f4caa
--- /dev/null
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -0,0 +1,202 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseGroupSnapshotsTest(base.BaseVolumeAdminTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseGroupSnapshotsTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
+ def _create_group_snapshot(self, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '-Group_Snapshot')
+
+ group_snapshot = self.group_snapshots_client.create_group_snapshot(
+ **kwargs)['group_snapshot']
+ group_snapshot['group_id'] = kwargs['group_id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_group_snapshot, group_snapshot)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], 'available')
+ return group_snapshot
+
+ def _delete_group_snapshot(self, group_snapshot):
+ self.group_snapshots_client.delete_group_snapshot(group_snapshot['id'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for vol in vols:
+ for snap in snapshots:
+ if (vol['group_id'] == group_snapshot['group_id'] and
+ vol['id'] == snap['volume_id']):
+ self.snapshots_client.wait_for_resource_deletion(
+ snap['id'])
+ self.group_snapshots_client.wait_for_resource_deletion(
+ group_snapshot['id'])
+
+
+class GroupSnapshotsTest(BaseGroupSnapshotsTest):
+ _api_version = 3
+ min_microversion = '3.14'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
+ def test_group_snapshot_create_show_list_delete(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = self._create_group_snapshot(
+ group_id=grp['id'], name=group_snapshot_name)
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get a given group snapshot
+ group_snapshot = self.group_snapshots_client.show_group_snapshot(
+ group_snapshot['id'])['group_snapshot']
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get all group snapshots with details, check some detail-specific
+ # elements, and look for the created group snapshot
+ group_snapshots = self.group_snapshots_client.list_group_snapshots(
+ detail=True)['group_snapshots']
+ for grp_snapshot in group_snapshots:
+ self.assertIn('created_at', grp_snapshot)
+ self.assertIn('group_id', grp_snapshot)
+ self.assertIn((group_snapshot['name'], group_snapshot['id']),
+ [(m['name'], m['id']) for m in group_snapshots])
+
+ # Delete group snapshot
+ self._delete_group_snapshot(group_snapshot)
+ group_snapshots = self.group_snapshots_client.list_group_snapshots()[
+ 'group_snapshots']
+ self.assertEmpty(group_snapshots)
+
+ @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
+ def test_create_group_from_group_snapshot(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create Group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group_snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = self._create_group_snapshot(
+ group_id=grp['id'], name=group_snapshot_name)
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Create Group from Group snapshot
+ grp_name2 = data_utils.rand_name('Group_from_snap')
+ grp2 = self.groups_client.create_group_from_source(
+ group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
+ self.addCleanup(self.delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, vol['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp2['id'], 'available')
+
+
+class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
+ _api_version = 3
+ min_microversion = '3.19'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
+ def test_reset_group_snapshot_status(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ group = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=group['id'])
+
+ # Create group snapshot
+ group_snapshot = self._create_group_snapshot(group_id=group['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if volume['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Reset group snapshot status
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.group_snapshots_client,
+ group_snapshot['id'], 'available')
+ self.addCleanup(
+ self.admin_group_snapshots_client.reset_group_snapshot_status,
+ group_snapshot['id'], 'available')
+ for status in ['creating', 'available', 'error']:
+ self.admin_group_snapshots_client.reset_group_snapshot_status(
+ group_snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], status)
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
new file mode 100644
index 0000000..c5e6d1a
--- /dev/null
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -0,0 +1,80 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
+ _api_version = 3
+ min_microversion = '3.11'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('bb4e30d0-de6e-4f4d-866c-dcc48d023b4e')
+ def test_group_type_specs_create_show_update_list_delete(self):
+ # Create new group type
+ group_type = self.create_group_type()
+
+ # Create new group type specs
+ create_specs = {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ body = self.admin_group_types_client.create_or_update_group_type_specs(
+ group_type['id'], create_specs)['group_specs']
+ self.assertEqual(create_specs, body)
+
+ # Create a new group type spec and update an existing group type spec
+ update_specs = {
+ "key2": "value2-updated",
+ "key3": "value3"
+ }
+ body = self.admin_group_types_client.create_or_update_group_type_specs(
+ group_type['id'], update_specs)['group_specs']
+ self.assertEqual(update_specs, body)
+
+ # Show specified item of group type specs
+ spec_keys = ['key2', 'key3']
+ for key in spec_keys:
+ body = self.admin_group_types_client.show_group_type_specs_item(
+ group_type['id'], key)
+ self.assertIn(key, body)
+ self.assertEqual(update_specs[key], body[key])
+
+ # Update specified item of group type specs
+ update_key = 'key3'
+ update_spec = {update_key: "value3-updated"}
+ body = self.admin_group_types_client.update_group_type_specs_item(
+ group_type['id'], update_key, update_spec)
+ self.assertEqual(update_spec, body)
+
+ # List all group type specs that created or updated above
+ list_specs = {}
+ list_specs.update(create_specs)
+ list_specs.update(update_specs)
+ list_specs.update(update_spec)
+ body = self.admin_group_types_client.list_group_type_specs(
+ group_type['id'])['group_specs']
+ self.assertEqual(list_specs, body)
+
+ # Delete specified item of group type specs
+ delete_key = 'key1'
+ self.admin_group_types_client.delete_group_type_specs_item(
+ group_type['id'], delete_key)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.admin_group_types_client.show_group_type_specs_item,
+ group_type['id'], delete_key)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 0df5fbd..6723207 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -24,7 +24,7 @@
max_microversion = 'latest'
@decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
- def test_group_type_create_list_show(self):
+ def test_group_type_create_list_update_show(self):
# Create/list/show group type.
name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
description = data_utils.rand_name("group-type-description")
@@ -46,8 +46,19 @@
self.assertIsInstance(group_list, list)
self.assertNotEmpty(group_list)
+ update_params = {
+ 'name': data_utils.rand_name(
+ self.__class__.__name__ + '-updated-group-type'),
+ 'description': 'updated-group-type-desc'
+ }
+ updated_group_type = self.admin_group_types_client.update_group_type(
+ body['id'], **update_params)['group_type']
+ for key, expected_val in update_params.items():
+ self.assertEqual(expected_val, updated_group_type[key])
+
fetched_group_type = self.admin_group_types_client.show_group_type(
body['id'])['group_type']
+ params.update(update_params) # Add updated params to original params.
for key in params.keys():
self.assertEqual(params[key], fetched_group_type[key],
'%s of the fetched group_type is different '
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 6b53d85..2f6eb6b 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -17,54 +17,14 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
-class BaseGroupsTest(base.BaseVolumeAdminTest):
-
- def _delete_group(self, grp_id, delete_volumes=True):
- self.groups_client.delete_group(grp_id, delete_volumes)
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp_id:
- self.volumes_client.wait_for_resource_deletion(vol['id'])
- self.groups_client.wait_for_resource_deletion(grp_id)
-
- def _delete_group_snapshot(self, group_snapshot_id, grp_id):
- self.group_snapshots_client.delete_group_snapshot(group_snapshot_id)
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for vol in vols:
- for snap in snapshots:
- if (vol['group_id'] == grp_id and
- vol['id'] == snap['volume_id']):
- self.snapshots_client.wait_for_resource_deletion(
- snap['id'])
- self.group_snapshots_client.wait_for_resource_deletion(
- group_snapshot_id)
-
- def _create_group(self, group_type, volume_type, grp_name=None):
- if not grp_name:
- grp_name = data_utils.rand_name('Group')
- grp = self.groups_client.create_group(
- group_type=group_type['id'],
- volume_types=[volume_type['id']],
- name=grp_name)['group']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_group, grp['id'])
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp['id'], 'available')
- self.assertEqual(grp_name, grp['name'])
- return grp
-
-
-class GroupsTest(BaseGroupsTest):
+class GroupsTest(base.BaseVolumeAdminTest):
_api_version = 3
- min_microversion = '3.14'
+ min_microversion = '3.13'
max_microversion = 'latest'
@decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
@@ -77,13 +37,15 @@
# Create group
grp1_name = data_utils.rand_name('Group1')
- grp1 = self._create_group(group_type, volume_type,
- grp_name=grp1_name)
+ grp1 = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp1_name)
grp1_id = grp1['id']
grp2_name = data_utils.rand_name('Group2')
- grp2 = self._create_group(group_type, volume_type,
- grp_name=grp2_name)
+ grp2 = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp2_name)
grp2_id = grp2['id']
# Create volume
@@ -125,143 +87,12 @@
# Delete group
# grp1 has a volume so delete_volumes flag is set to True by default
- self._delete_group(grp1_id)
+ self.delete_group(grp1_id)
# grp2 is empty so delete_volumes flag can be set to False
- self._delete_group(grp2_id, delete_volumes=False)
+ self.delete_group(grp2_id, delete_volumes=False)
grps = self.groups_client.list_groups(detail=True)['groups']
self.assertEmpty(grps)
- @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
- def test_group_snapshot_create_show_list_delete(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- vol = self.create_volume(volume_type=volume_type['id'],
- group_id=grp['id'])
-
- # Create group snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (
- self.group_snapshots_client.create_group_snapshot(
- group_id=grp['id'],
- name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_group_snapshot,
- group_snapshot['id'], grp['id'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if vol['id'] == snap['volume_id']:
- waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client,
- group_snapshot['id'], 'available')
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
-
- # Get a given group snapshot
- group_snapshot = self.group_snapshots_client.show_group_snapshot(
- group_snapshot['id'])['group_snapshot']
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
-
- # Get all group snapshots with details, check some detail-specific
- # elements, and look for the created group snapshot
- group_snapshots = (self.group_snapshots_client.list_group_snapshots(
- detail=True)['group_snapshots'])
- for grp_snapshot in group_snapshots:
- self.assertIn('created_at', grp_snapshot)
- self.assertIn('group_id', grp_snapshot)
- self.assertIn((group_snapshot['name'], group_snapshot['id']),
- [(m['name'], m['id']) for m in group_snapshots])
-
- # Delete group snapshot
- self._delete_group_snapshot(group_snapshot['id'], grp['id'])
- group_snapshots = (self.group_snapshots_client.list_group_snapshots()
- ['group_snapshots'])
- self.assertEmpty(group_snapshots)
-
- @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
- def test_create_group_from_group_snapshot(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create Group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- vol = self.create_volume(volume_type=volume_type['id'],
- group_id=grp['id'])
-
- # Create group_snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (
- self.group_snapshots_client.create_group_snapshot(
- group_id=grp['id'],
- name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(self._delete_group_snapshot,
- group_snapshot['id'], grp['id'])
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if vol['id'] == snap['volume_id']:
- waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], 'available')
-
- # Create Group from Group snapshot
- grp_name2 = data_utils.rand_name('Group_from_snap')
- grp2 = self.groups_client.create_group_from_source(
- group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
- self.addCleanup(self._delete_group, grp2['id'])
- self.assertEqual(grp_name2, grp2['name'])
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp2['id']:
- waiters.wait_for_volume_resource_status(
- self.volumes_client, vol['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp2['id'], 'available')
-
- @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
- def test_create_group_from_group(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create Group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
-
- # Create Group from Group
- grp_name2 = data_utils.rand_name('Group_from_grp')
- grp2 = self.groups_client.create_group_from_source(
- source_group_id=grp['id'], name=grp_name2)['group']
- self.addCleanup(self._delete_group, grp2['id'])
- self.assertEqual(grp_name2, grp2['name'])
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp2['id']:
- waiters.wait_for_volume_resource_status(
- self.volumes_client, vol['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp2['id'], 'available')
-
@decorators.idempotent_id('4a8a6fd2-8b3b-4641-8f54-6a6f99320006')
def test_group_update(self):
# Create volume type
@@ -271,7 +102,8 @@
group_type = self.create_group_type()
# Create Group
- grp = self._create_group(group_type, volume_type)
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Create volumes
grp_vols = []
@@ -317,56 +149,42 @@
self.assertEqual(2, len(grp_vols))
-class GroupsV319Test(BaseGroupsTest):
+class GroupsV314Test(base.BaseVolumeAdminTest):
_api_version = 3
- min_microversion = '3.19'
+ min_microversion = '3.14'
max_microversion = 'latest'
- @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
- def test_reset_group_snapshot_status(self):
+ @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
+ def test_create_group_from_group(self):
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
- # Create group
- group = self._create_group(group_type, volume_type)
+ # Create Group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Create volume
- volume = self.create_volume(volume_type=volume_type['id'],
- group_id=group['id'])
+ self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
- # Create group snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (self.group_snapshots_client.create_group_snapshot(
- group_id=group['id'], name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(self._delete_group_snapshot,
- group_snapshot['id'], group['id'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if volume['id'] == snap['volume_id']:
+ # Create Group from Group
+ grp_name2 = data_utils.rand_name('Group_from_grp')
+ grp2 = self.groups_client.create_group_from_source(
+ source_group_id=grp['id'], name=grp_name2)['group']
+ self.addCleanup(self.delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
+ self.volumes_client, vol['id'], 'available')
waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], 'available')
-
- # Reset group snapshot status
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.group_snapshots_client,
- group_snapshot['id'], 'available')
- self.addCleanup(
- self.admin_group_snapshots_client.reset_group_snapshot_status,
- group_snapshot['id'], 'available')
- for status in ['creating', 'available', 'error']:
- self.admin_group_snapshots_client.reset_group_snapshot_status(
- group_snapshot['id'], status)
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], status)
+ self.groups_client, grp2['id'], 'available')
-class GroupsV320Test(BaseGroupsTest):
+class GroupsV320Test(base.BaseVolumeAdminTest):
_api_version = 3
min_microversion = '3.20'
max_microversion = 'latest'
@@ -380,7 +198,8 @@
group_type = self.create_group_type()
# Create group
- group = self._create_group(group_type, volume_type)
+ group = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Reset group status
self.addCleanup(waiters.wait_for_volume_resource_status,
diff --git a/tempest/api/volume/admin/test_snapshot_manage.py b/tempest/api/volume/admin/test_snapshot_manage.py
index 9ff7160..37a47ec 100644
--- a/tempest/api/volume/admin/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/test_snapshot_manage.py
@@ -35,6 +35,9 @@
def skip_checks(cls):
super(SnapshotManageAdminTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
if not CONF.volume_feature_enabled.manage_snapshot:
raise cls.skipException("Manage snapshot tests are disabled")
@@ -60,7 +63,7 @@
# Verify the original snapshot does not exist in snapshot list
params = {'all_tenants': 1}
all_snapshots = self.admin_snapshots_client.list_snapshots(
- detail=True, params=params)['snapshots']
+ detail=True, **params)['snapshots']
self.assertNotIn(snapshot['id'], [v['id'] for v in all_snapshots])
# Manage the snapshot
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 42bfcd6..6f9daa8 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -38,7 +38,6 @@
def setup_credentials(cls):
super(BaseVolumeQuotasAdminTestJSON, cls).setup_credentials()
cls.demo_tenant_id = cls.os_primary.credentials.tenant_id
- cls.alt_client = cls.os_alt.volumes_client_latest
@classmethod
def setup_clients(cls):
@@ -150,7 +149,8 @@
self.demo_tenant_id, params={'usage': True})['quota_set']
alt_quota = self.admin_quotas_client.show_quota_set(
- self.alt_client.tenant_id, params={'usage': True})['quota_set']
+ self.os_alt.volumes_client_latest.tenant_id,
+ params={'usage': True})['quota_set']
# Creates a volume transfer
transfer = self.transfer_client.create_volume_transfer(
@@ -164,14 +164,15 @@
# Verify volume transferred is available
waiters.wait_for_volume_resource_status(
- self.alt_client, volume['id'], 'available')
+ self.os_alt.volumes_client_latest, volume['id'], 'available')
# List of tenants quota usage post transfer
new_primary_quota = self.admin_quotas_client.show_quota_set(
self.demo_tenant_id, params={'usage': True})['quota_set']
new_alt_quota = self.admin_quotas_client.show_quota_set(
- self.alt_client.tenant_id, params={'usage': True})['quota_set']
+ self.os_alt.volumes_client_latest.tenant_id,
+ params={'usage': True})['quota_set']
# Verify tenants quota usage was updated
self.assertEqual(primary_quota['volumes']['in_use'] -
diff --git a/tempest/api/volume/admin/test_volume_type_access.py b/tempest/api/volume/admin/test_volume_type_access.py
index e93bcb5..b64face 100644
--- a/tempest/api/volume/admin/test_volume_type_access.py
+++ b/tempest/api/volume/admin/test_volume_type_access.py
@@ -27,11 +27,6 @@
credentials = ['primary', 'alt', 'admin']
- @classmethod
- def setup_clients(cls):
- super(VolumeTypesAccessTest, cls).setup_clients()
- cls.alt_client = cls.os_alt.volumes_client_latest
-
@decorators.idempotent_id('d4dd0027-835f-4554-a6e5-50903fb79184')
def test_volume_type_access_add(self):
# Creating a NON public volume type
@@ -70,10 +65,11 @@
# Adding volume type access for alt tenant
self.admin_volume_types_client.add_type_access(
- volume_type['id'], project=self.alt_client.tenant_id)
+ volume_type['id'],
+ project=self.os_alt.volumes_client_latest.tenant_id)
self.addCleanup(self.admin_volume_types_client.remove_type_access,
volume_type['id'],
- project=self.alt_client.tenant_id)
+ project=self.os_alt.volumes_client_latest.tenant_id)
# List tenant access for the given volume type
type_access_list = self.admin_volume_types_client.list_type_access(
@@ -88,5 +84,5 @@
# Validating the permitted tenants are the expected tenants
self.assertIn(self.volumes_client.tenant_id,
map(operator.itemgetter('project_id'), type_access_list))
- self.assertIn(self.alt_client.tenant_id,
+ self.assertIn(self.os_alt.volumes_client_latest.tenant_id,
map(operator.itemgetter('project_id'), type_access_list))
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index cfe2068..0a6b79d 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -211,6 +211,27 @@
self.servers_client.delete_server, body['id'])
return body
+ def create_group(self, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '-Group')
+
+ group = self.groups_client.create_group(**kwargs)['group']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.delete_group, group['id'])
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, group['id'], 'available')
+ return group
+
+ def delete_group(self, group_id, delete_volumes=True):
+ self.groups_client.delete_group(group_id, delete_volumes)
+ if delete_volumes:
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == group_id:
+ self.volumes_client.wait_for_resource_deletion(vol['id'])
+ self.groups_client.wait_for_resource_deletion(group_id)
+
class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index de28a30..54052ae 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -18,6 +18,7 @@
import testtools
from tempest.api.volume import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
@@ -79,11 +80,6 @@
# is implicit - Cinder calls Nova at that microversion, Tempest does not.
min_microversion = '3.42'
- @classmethod
- def setup_clients(cls):
- super(VolumesExtendAttachedTest, cls).setup_clients()
- cls.admin_servers_client = cls.os_admin.servers_client
-
def _find_extend_volume_instance_action(self, server_id):
actions = self.servers_client.list_instance_actions(
server_id)['instanceActions']
@@ -94,7 +90,7 @@
def _find_extend_volume_instance_action_finish_event(self, action):
# This has to be called by an admin client otherwise
# the events don't show up.
- action = self.admin_servers_client.show_instance_action(
+ action = self.os_admin.servers_client.show_instance_action(
action['instance_uuid'], action['request_id'])['instanceAction']
for event in action['events']:
if (event['event'] == 'compute_extend_volume' and
@@ -104,6 +100,7 @@
@decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
@testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
"Attached volume extend is disabled.")
+ @utils.services('compute')
def test_extend_attached_volume(self):
"""This is a happy path test which does the following:
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index b5f98ea..d5358ab 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -26,15 +26,28 @@
class VolumesListTestJSON(base.BaseVolumeTest):
- # NOTE: This test creates a number of 1G volumes. To run successfully,
- # ensure that the backing file for the volume group that Nova uses
+ # NOTE: This test creates a number of 1G volumes. To run it successfully,
+ # ensure that the backing file for the volume group that Cinder uses
# has space for at least 3 1G volumes!
# If you are running a Devstack environment, ensure that the
# VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
VOLUME_FIELDS = ('id', 'name')
- def assertVolumesIn(self, fetched_list, expected_list, fields=None):
+ @classmethod
+ def _remove_volatile_fields(cls, fetched_list):
+ """Remove fields that should not be compared.
+
+ This method makes sure that Tempest does not compare e.g.
+ the volume's "updated_at" field that may change for any reason
+ internal to the operation of Cinder.
+ """
+ for volume in fetched_list:
+ for field in ('updated_at',):
+ if field in volume:
+ del volume[field]
+
+ def _assert_volumes_in(self, fetched_list, expected_list, fields=None):
"""Check out the list.
This function is aim at check out whether all of the volumes in
@@ -45,6 +58,8 @@
expected_list = map(fieldsgetter, expected_list)
fetched_list = [fieldsgetter(item) for item in fetched_list]
+ # Hopefully the expected_list has already been cleaned.
+ self._remove_volatile_fields(fetched_list)
missing_vols = [v for v in expected_list if v not in fetched_list]
if not missing_vols:
return
@@ -72,6 +87,7 @@
volume = cls.volumes_client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
+ cls._remove_volatile_fields(cls.volume_list)
def _list_by_param_value_and_assert(self, params, with_detail=False):
"""list or list_details with given params and validates result"""
@@ -103,15 +119,15 @@
# Get a list of Volumes
# Fetch all volumes
fetched_list = self.volumes_client.list_volumes()['volumes']
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('adcbb5a7-5ad8-4b61-bd10-5380e111a877')
def test_volume_list_with_details(self):
# Get a list of Volumes with details
# Fetch all Volumes
fetched_list = self.volumes_client.list_volumes(detail=True)['volumes']
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('a28e8da4-0b56-472f-87a8-0f4d3f819c02')
def test_volume_list_by_name(self):
@@ -137,8 +153,8 @@
fetched_list = self.volumes_client.list_volumes(
params=params)['volumes']
self._list_by_param_value_and_assert(params)
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('2943f712-71ec-482a-bf49-d5ca06216b9f')
def test_volumes_list_details_by_status(self):
@@ -147,7 +163,7 @@
detail=True, params=params)['volumes']
for volume in fetched_list:
self.assertEqual('available', volume['status'])
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('2016a942-3020-40d7-95ce-7613bf8407ce')
def test_volumes_list_by_bootable(self):
@@ -160,8 +176,8 @@
fetched_list = self.volumes_client.list_volumes(
params=params)['volumes']
self._list_by_param_value_and_assert(params)
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('2016a939-72ec-482a-bf49-d5ca06216b9f')
def test_volumes_list_details_by_bootable(self):
@@ -170,7 +186,7 @@
detail=True, params=params)['volumes']
for volume in fetched_list:
self.assertEqual('false', volume['bootable'])
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('c0cfa863-3020-40d7-b587-e35f597d5d87')
def test_volumes_list_by_availability_zone(self):
@@ -180,8 +196,8 @@
fetched_list = self.volumes_client.list_volumes(
params=params)['volumes']
self._list_by_param_value_and_assert(params)
- self.assertVolumesIn(fetched_list, self.volume_list,
- fields=self.VOLUME_FIELDS)
+ self._assert_volumes_in(fetched_list, self.volume_list,
+ fields=self.VOLUME_FIELDS)
@decorators.idempotent_id('e1b80d13-94f0-4ba2-a40e-386af29f8db1')
def test_volumes_list_details_by_availability_zone(self):
@@ -192,7 +208,7 @@
detail=True, params=params)['volumes']
for volume in fetched_list:
self.assertEqual(zone, volume['availability_zone'])
- self.assertVolumesIn(fetched_list, self.volume_list)
+ self._assert_volumes_in(fetched_list, self.volume_list)
@decorators.idempotent_id('b5ebea1b-0603-40a0-bb41-15fcd0a53214')
def test_volume_list_with_param_metadata(self):
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index c2f8627..1c671ec 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -15,18 +15,18 @@
# under the License.
"""
-Utility for creating **accounts.yaml** file for concurrent test runs.
+Utility for creating ``accounts.yaml`` file for concurrent test runs.
Creates one primary user, one alt user, one swift admin, one stack owner
and one admin (optionally) for each concurrent thread. The utility creates
-user for each tenant. The **accounts.yaml** file will be valid and contain
+user for each tenant. The ``accounts.yaml`` file will be valid and contain
credentials for created users, so each user will be in separate tenant and
have the username, tenant_name, password and roles.
-**Usage:** ``tempest account-generator [-h] [OPTIONS] accounts_file.yaml``.
+**Usage:** ``tempest account-generator [-h] [OPTIONS] accounts_file.yaml``
Positional Arguments
--------------------
-**accounts_file.yaml** (Required) Provide an output accounts yaml file. Utility
+``accounts_file.yaml`` (Required) Provide an output accounts yaml file. Utility
creates a .yaml file in the directory where the command is ran. The appropriate
name for the file is *accounts.yaml* and it should be placed in *tempest/etc*
directory.
@@ -40,62 +40,62 @@
You're probably familiar with these, but just to remind:
-======== ======================== ====================
-Param CLI Environment Variable
-======== ======================== ====================
-Username --os-username OS_USERNAME
-Password --os-password OS_PASSWORD
-Project --os-project-name OS_PROJECT_NAME
-Tenant --os-tenant-name (depr.) OS_TENANT_NAME
-Domain --os-domain-name OS_DOMAIN_NAME
-======== ======================== ====================
+======== ============================ ====================
+Param CLI Environment Variable
+======== ============================ ====================
+Username ``--os-username`` OS_USERNAME
+Password ``--os-password`` OS_PASSWORD
+Project ``--os-project-name`` OS_PROJECT_NAME
+Tenant ``--os-tenant-name`` (depr.) OS_TENANT_NAME
+Domain ``--os-domain-name`` OS_DOMAIN_NAME
+======== ============================ ====================
Optional Arguments
------------------
-**-h**, **--help** (Optional) Shows help message with the description of
-utility and its arguments, and exits.
+* ``-h, --help`` (Optional) Shows help message with the description of
+ utility and its arguments, and exits.
-**-c /etc/tempest.conf**, **--config-file /etc/tempest.conf** (Optional) Path
-to tempest config file. If not specified, it searches for tempest.conf in these
-locations:
+* ``-c, --config-file /etc/tempest.conf`` (Optional) Path
+ to tempest config file. If not specified, it searches for tempest.conf in
+ these locations:
-- ./etc/
-- /etc/tempest
-- ~/.tempest/
-- ~/
-- /etc/
+ - ./etc/
+ - /etc/tempest
+ - ~/.tempest/
+ - ~/
+ - /etc/
-**--os-username <auth-user-name>** (Optional) Name used for authentication with
-the OpenStack Identity service. Defaults to env[OS_USERNAME]. Note: User should
-have permissions to create new user accounts and tenants.
+* ``--os-username <auth-user-name>`` (Optional) Name used for authentication
+ with the OpenStack Identity service. Defaults to env[OS_USERNAME]. Note: User
+ should have permissions to create new user accounts and tenants.
-**--os-password <auth-password>** (Optional) Password used for authentication
-with the OpenStack Identity service. Defaults to env[OS_PASSWORD].
+* ``--os-password <auth-password>`` (Optional) Password used for authentication
+ with the OpenStack Identity service. Defaults to env[OS_PASSWORD].
-**--os-project-name <auth-project-name>** (Optional) Project to request
-authorization on. Defaults to env[OS_PROJECT_NAME].
+* ``--os-project-name <auth-project-name>`` (Optional) Project to request
+ authorization on. Defaults to env[OS_PROJECT_NAME].
-**--os-tenant-name <auth-tenant-name>** (Optional, deprecated) Tenant to
-request authorization on. Defaults to env[OS_TENANT_NAME].
+* ``--os-tenant-name <auth-tenant-name>`` (Optional, deprecated) Tenant to
+ request authorization on. Defaults to env[OS_TENANT_NAME].
-**--os-domain-name <auth-domain-name>** (Optional) Domain the user and project
-belong to. Defaults to env[OS_DOMAIN_NAME].
+* ``--os-domain-name <auth-domain-name>`` (Optional) Domain the user and
+ project belong to. Defaults to env[OS_DOMAIN_NAME].
-**--tag TAG** (Optional) Resources tag. Each created resource (user, project)
-will have the prefix with the given TAG in its name. Using tag is recommended
-for the further using, cleaning resources.
+* ``--tag TAG`` (Optional) Resources tag. Each created resource (user, project)
+ will have the prefix with the given TAG in its name. Using tag is recommended
+ for the further using, cleaning resources.
-**-r CONCURRENCY**, **--concurrency CONCURRENCY** (Optional) Concurrency count
-(default: 1). The number of accounts required can be estimated as
-CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
-a different tenant. This is required to provide isolation between test for
-running in parallel.
+* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count
+ (default: 1). The number of accounts required can be estimated as
+ CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
+ a different tenant. This is required to provide isolation between test for
+ running in parallel.
-**--with-admin** (Optional) Creates admin for each concurrent group
-(default: False).
+* ``--with-admin`` (Optional) Creates admin for each concurrent group
+ (default: False).
-**-i VERSION**, **--identity-version VERSION** (Optional) Provisions accounts
-using the specified version of the identity API. (default: '3').
+* ``-i, --identity-version VERSION`` (Optional) Provisions accounts
+ using the specified version of the identity API. (default: '3').
To see help on specific argument, please do: ``tempest account-generator
[OPTIONS] <accounts_file.yaml> -h``.
@@ -162,9 +162,7 @@
if CONF.service_available.swift:
spec.append([CONF.object_storage.operator_role])
spec.append([CONF.object_storage.reseller_admin_role])
- if CONF.service_available.heat:
- spec.append([CONF.orchestration.stack_owner_role,
- CONF.object_storage.operator_role])
+ spec.append([CONF.object_storage.operator_role])
if admin:
spec.append('admin')
resources = []
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index d0aa7dc..29abd49 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -28,45 +28,48 @@
Example Run
-----------
-**WARNING: If step 1 is skipped in the example below, the cleanup procedure
-may delete resources that existed in the cloud before the test run. This
-may cause an unwanted destruction of cloud resources, so use caution with
-this command.**
+.. warning::
-``$ tempest cleanup --init-saved-state``
+ If step 1 is skipped in the example below, the cleanup procedure
+ may delete resources that existed in the cloud before the test run. This
+ may cause an unwanted destruction of cloud resources, so use caution with
+ this command.
-``$ # Actual running of Tempest tests``
+ Examples::
-``$ tempest cleanup``
+ $ tempest cleanup --init-saved-state
+ $ # Actual running of Tempest tests
+ $ tempest cleanup
Runtime Arguments
-----------------
-**--init-saved-state**: Initializes the saved state of the OpenStack deployment
-and will output a ``saved_state.json`` file containing resources from your
-deployment that will be preserved from the cleanup command. This should be
-done prior to running Tempest tests.
+* ``--init-saved-state``: Initializes the saved state of the OpenStack
+ deployment and will output a ``saved_state.json`` file containing resources
+ from your deployment that will be preserved from the cleanup command. This
+ should be done prior to running Tempest tests.
-**--delete-tempest-conf-objects**: If option is present, then the command will
-delete the admin project in addition to the resources associated with them on
-clean up. If option is not present, the command will delete the resources
-associated with the Tempest and alternate Tempest users and projects but will
-not delete the projects themselves.
+* ``--delete-tempest-conf-objects``: If option is present, then the command
+ will delete the admin project in addition to the resources associated with
+ them on clean up. If option is not present, the command will delete the
+ resources associated with the Tempest and alternate Tempest users and
+ projects but will not delete the projects themselves.
-**--dry-run**: Creates a report (``./dry_run.json``) of the projects that will
-be cleaned up (in the ``_projects_to_clean`` dictionary [1]_) and the global
-objects that will be removed (domains, flavors, images, roles, projects,
-and users). Once the cleanup command is executed (e.g. run without
-parameters), running it again with **--dry-run** should yield an empty report.
+* ``--dry-run``: Creates a report (``./dry_run.json``) of the projects that
+ will be cleaned up (in the ``_projects_to_clean`` dictionary [1]_) and the
+ global objects that will be removed (domains, flavors, images, roles,
+ projects, and users). Once the cleanup command is executed (e.g. run without
+ parameters), running it again with ``--dry-run`` should yield an empty
+ report.
-**--help**: Print the help text for the command and parameters.
+* ``--help``: Print the help text for the command and parameters.
.. [1] The ``_projects_to_clean`` dictionary in ``dry_run.json`` lists the
projects that ``tempest cleanup`` will loop through to delete child
objects, but the command will, by default, not delete the projects
themselves. This may differ from the ``projects`` list as you can clean
the Tempest and alternate Tempest users and projects but they will not be
- deleted unless the **--delete-tempest-conf-objects** flag is used to
+ deleted unless the ``--delete-tempest-conf-objects`` flag is used to
force their deletion.
"""
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index d1e80f1..025959a 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -37,7 +37,6 @@
IS_CINDER = None
IS_GLANCE = None
-IS_HEAT = None
IS_NEUTRON = None
IS_NOVA = None
@@ -60,7 +59,6 @@
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
- IS_HEAT = CONF.service_available.heat
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
@@ -212,33 +210,6 @@
self.data['server_groups'] = sgs
-class StackService(BaseService):
- def __init__(self, manager, **kwargs):
- super(StackService, self).__init__(kwargs)
- params = config.service_client_config('orchestration')
- self.client = manager.orchestration.OrchestrationClient(
- manager.auth_provider, **params)
-
- def list(self):
- client = self.client
- stacks = client.list_stacks()['stacks']
- LOG.debug("List count, %s Stacks", len(stacks))
- return stacks
-
- def delete(self):
- client = self.client
- stacks = self.list()
- for stack in stacks:
- try:
- client.delete_stack(stack['id'])
- except Exception:
- LOG.exception("Delete Stack exception.")
-
- def dry_run(self):
- stacks = self.list()
- self.data['stacks'] = stacks
-
-
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
super(KeyPairService, self).__init__(kwargs)
@@ -960,8 +931,6 @@
if not IS_NEUTRON:
project_services.append(FloatingIpService)
project_services.append(NovaQuotaService)
- if IS_HEAT:
- project_services.append(StackService)
if IS_NEUTRON:
project_services.append(NetworkFloatingIpService)
if utils.is_extension_enabled('metering', 'network'):
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index f07f197..6435717 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -19,11 +19,11 @@
==============
Tempest run has several options:
- * **--regex/-r**: This is a selection regex like what testr uses. It will run
- any tests that match on re.match() with the regex
- * **--smoke/-s**: Run all the tests tagged as smoke
+* ``--regex, -r``: This is a selection regex like what testr uses. It will run
+ any tests that match on re.match() with the regex
+* ``--smoke, -s``: Run all the tests tagged as smoke
-There are also the **--blacklist-file** and **--whitelist-file** options that
+There are also the ``--blacklist-file`` and ``--whitelist-file`` options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
@@ -44,21 +44,21 @@
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
-You can also use the **--list-tests** option in conjunction with selection
+You can also use the ``--list-tests`` option in conjunction with selection
arguments to list which tests will be run.
-You can also use the **--load-list** option that lets you pass a filepath to
+You can also use the ``--load-list`` option that lets you pass a filepath to
tempest run with the file format being in a non-regex format, similar to the
-tests generated by the **--list-tests** option. You can specify target tests
+tests generated by the ``--list-tests`` option. You can specify target tests
by removing unnecessary tests from a list file which is generated from
-**--list-tests** option.
+``--list-tests`` option.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
-If you want to adjust the number of workers use the **--concurrency** option
-and if you want to run tests serially use **--serial/-t**
+If you want to adjust the number of workers use the ``--concurrency`` option
+and if you want to run tests serially use ``--serial/-t``
Running with Workspaces
-----------------------
@@ -82,7 +82,7 @@
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
-output to STDOUT use the **--subunit** flag
+output to STDOUT use the ``--subunit`` flag
Combining Runs
==============
@@ -90,7 +90,7 @@
There are certain situations in which you want to split a single run of tempest
across 2 executions of tempest run. (for example to run part of the tests
serially and others in parallel) To accomplish this but still treat the results
-as a single run you can leverage the **--combine** option which will append
+as a single run you can leverage the ``--combine`` option which will append
the current run's results with the previous runs.
"""
@@ -149,7 +149,7 @@
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
- testr_conf_file.write(file_contents)
+ testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index f9ebe20..f0ade7e 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -21,17 +21,14 @@
Runtime Arguments
-----------------
-**--subunit, -s**: (Optional) The path to the subunit file being parsed,
-defaults to stdin
-
-**--non-subunit-name, -n**: (Optional) The file_name that the logs are being
-stored in
-
-**--output-file, -o**: (Optional) The path where the JSON output will be
-written to. This contains more information than is present in stdout.
-
-**--ports, -p**: (Optional) The path to a JSON file describing the ports being
-used by different services
+* ``--subunit, -s``: (Optional) The path to the subunit file being parsed,
+ defaults to stdin
+* ``--non-subunit-name, -n``: (Optional) The file_name that the logs are being
+ stored in
+* ``--output-file, -o``: (Optional) The path where the JSON output will be
+ written to. This contains more information than is present in stdout.
+* ``--ports, -p``: (Optional) The path to a JSON file describing the ports
+ being used by different services
Usage
-----
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index fdf28d5..15af271 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -349,7 +349,6 @@
'image': 'glance',
'object_storage': 'swift',
'compute': 'nova',
- 'orchestration': 'heat',
'baremetal': 'ironic',
'identity': 'keystone',
}
diff --git a/tempest/cmd/workspace.py b/tempest/cmd/workspace.py
index 8166b4f..929a584 100644
--- a/tempest/cmd/workspace.py
+++ b/tempest/cmd/workspace.py
@@ -26,28 +26,28 @@
register
--------
-Registers a new tempest workspace via a given --name and --path
+Registers a new tempest workspace via a given ``--name`` and ``--path``
rename
------
-Renames a tempest workspace from --old-name to --new-name
+Renames a tempest workspace from ``--old-name`` to ``--new-name``
move
----
-Changes the path of a given tempest workspace --name to --path
+Changes the path of a given tempest workspace ``--name`` to ``--path``
remove
------
-Deletes the entry for a given tempest workspace --name
+Deletes the entry for a given tempest workspace ``--name``
---rmdir Deletes the given tempest workspace directory
+``--rmdir`` Deletes the given tempest workspace directory
General Options
===============
- **--workspace_path**: Allows the user to specify a different location for the
- workspace.yaml file containing the workspace definitions
- instead of ~/.tempest/workspace.yaml
+* ``--workspace_path``: Allows the user to specify a different location for the
+ workspace.yaml file containing the workspace definitions instead of
+ ``~/.tempest/workspace.yaml``
"""
import os
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 86fe3f5..638ad9b 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -229,7 +229,7 @@
clients.servers_client, server['id'], wait_until)
# Multiple validatable servers are not supported for now. Their
- # creation will fail with the condition above (l.58).
+ # creation will fail with the condition above.
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
_setup_validation_fip()
@@ -289,13 +289,21 @@
def create_websocket(url):
url = urlparse.urlparse(url)
- if url.scheme == 'https':
- client_socket = ssl.wrap_socket(socket.socket(socket.AF_INET,
- socket.SOCK_STREAM))
+ for res in socket.getaddrinfo(url.hostname, url.port,
+ socket.AF_UNSPEC, socket.SOCK_STREAM):
+ af, socktype, proto, _, sa = res
+ client_socket = socket.socket(af, socktype, proto)
+ if url.scheme == 'https':
+ client_socket = ssl.wrap_socket(client_socket)
+ client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ client_socket.connect(sa)
+ except socket.error:
+ client_socket.close()
+ continue
+ break
else:
- client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- client_socket.connect((url.hostname, url.port))
+ raise socket.error('WebSocket creation failed')
# Turn the Socket into a WebSocket to do the communication
return _WebSocket(client_socket, url)
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index da34975..75db155 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -86,7 +86,7 @@
('public_network_id', CONF.network.public_network_id),
('create_networks', (CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network)),
- ('resource_prefix', CONF.resources_prefix),
+ ('resource_prefix', 'tempest'),
('identity_admin_endpoint_type', endpoint_type)
]))
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 5a86caa..225a713 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -31,10 +31,9 @@
if attr == 'rand_name':
# NOTE(flwang): This is a proxy to generate a random name that
- # includes a random number and a prefix if one is configured in
- # CONF.resources_prefix
+ # includes a random number and a prefix 'tempest'
attr_obj = partial(lib_data_utils.rand_name,
- prefix=CONF.resources_prefix)
+ prefix='tempest')
else:
attr_obj = getattr(lib_data_utils, attr)
@@ -78,7 +77,7 @@
decorators.attr(type=list(args))(f)
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
@@ -86,7 +85,7 @@
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
return wrapper
return decorator
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 10afee0..08e2a12 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -179,15 +179,13 @@
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, statuses):
- """Waits for a volume resource to reach any of the specified statuses.
+def wait_for_volume_resource_status(client, resource_id, status):
+ """Waits for a volume resource to reach a given status.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
"""
- if not isinstance(statuses, list):
- statuses = [statuses]
resource_name = re.findall(
r'(volume|group-snapshot|snapshot|backup|group)',
client.resource_type)[-1].replace('-', '_')
@@ -195,11 +193,11 @@
resource_status = show_resource(resource_id)[resource_name]['status']
start = int(time.time())
- while resource_status not in statuses:
+ while resource_status != status:
time.sleep(client.build_interval)
resource_status = show_resource(resource_id)[
'{}'.format(resource_name)]['status']
- if resource_status == 'error' and resource_status not in statuses:
+ if resource_status == 'error' and resource_status != status:
raise exceptions.VolumeResourceBuildErrorException(
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
@@ -208,11 +206,11 @@
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
- (resource_name, resource_id, statuses, resource_status,
+ (resource_name, resource_id, status, resource_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)
LOG.info('%s %s reached %s after waiting for %f seconds',
- resource_name, resource_id, statuses, time.time() - start)
+ resource_name, resource_id, status, time.time() - start)
def wait_for_volume_retype(client, volume_id, new_volume_type):
diff --git a/tempest/config.py b/tempest/config.py
index b14d4fd..231d005 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -65,9 +65,7 @@
deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
group='auth'),
cfg.DeprecatedOpt('allow_tenant_isolation',
- group='compute'),
- cfg.DeprecatedOpt('allow_tenant_isolation',
- group='orchestration')]),
+ group='compute')]),
cfg.ListOpt('tempest_roles',
help="Roles to assign to all users created by tempest",
default=[]),
@@ -229,17 +227,6 @@
"Empty list indicates all extensions are disabled. "
"To get the list of extensions run: "
"'openstack extension list --identity'"),
- # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed
- # in Newton and Ocata. This option can be removed after Mitaka is end of
- # life.
- cfg.BoolOpt('forbid_global_implied_dsr',
- default=False,
- help='Does the environment forbid global roles implying '
- 'domain specific ones?',
- deprecated_for_removal=True,
- deprecated_reason="This feature flag was introduced to "
- "support testing of old OpenStack versions, "
- "which are not supported anymore"),
cfg.BoolOpt('domain_specific_drivers',
default=False,
help='Are domain specific drivers enabled? '
@@ -312,9 +299,9 @@
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
- 'when shelved. This time should be the same as the time '
- 'of nova.conf, and some tests will run for as long as the '
- 'time.'),
+ 'when shelved. This configuration value should be same as '
+ '[nova.DEFAULT]->shelved_offload_time in nova.conf, and '
+ 'some tests will run for as long as the time.'),
cfg.IntOpt('min_compute_nodes',
default=1,
help=('The minimum number of compute nodes expected. This will '
@@ -484,6 +471,10 @@
default=False,
help='Does the test environment support in-place swapping of '
'volumes attached to a server instance?'),
+ cfg.BoolOpt('volume_backed_live_migration',
+ default=False,
+ help='Does the test environment support volume-backed live '
+ 'migration?'),
]
@@ -548,13 +539,6 @@
'are current one. In future, Tempest will '
'test v2 APIs only so this config option '
'will be removed.'),
- cfg.BoolOpt('deactivate_image',
- default=False,
- help="Is the deactivate-image feature enabled."
- " The feature has been integrated since Kilo.",
- deprecated_for_removal=True,
- deprecated_reason="All supported versions of OpenStack now "
- "support the 'deactivate_image' feature"),
]
network_group = cfg.OptGroup(name='network',
@@ -923,66 +907,6 @@
help="Execute discoverability tests"),
]
-orchestration_group = cfg.OptGroup(name='orchestration',
- title='Orchestration Service Options')
-
-OrchestrationGroup = [
- cfg.StrOpt('catalog_type',
- default='orchestration',
- help="Catalog type of the Orchestration service.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('region',
- default='',
- help="The orchestration region name to use. If empty, the "
- "value of identity.region is used instead. If no such "
- "region is found in the service catalog, the first found "
- "one is used.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('endpoint_type',
- default='publicURL',
- choices=['public', 'admin', 'internal',
- 'publicURL', 'adminURL', 'internalURL'],
- help="The endpoint type to use for the orchestration service.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('stack_owner_role', default='heat_stack_owner',
- help='Role required for users to be able to manage stacks',
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('build_interval',
- default=1,
- help="Time in seconds between build status checks.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('build_timeout',
- default=1200,
- help="Timeout in seconds to wait for a stack to build.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('instance_type',
- default='m1.micro',
- help="Instance type for tests. Needs to be big enough for a "
- "full OS plus the test workload",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.StrOpt('keypair_name',
- help="Name of existing keypair to launch servers with.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('max_template_size',
- default=524288,
- help="Value must match heat configuration of the same name.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
- cfg.IntOpt('max_resources_per_stack',
- default=1000,
- help="Value must match heat configuration of the same name.",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
-]
-
scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
@@ -1044,11 +968,6 @@
cfg.BoolOpt('nova',
default=True,
help="Whether or not nova is expected to be available"),
- cfg.BoolOpt('heat',
- default=False,
- help="Whether or not Heat is expected to be available",
- deprecated_for_removal=True,
- deprecated_reason='Heat support will be removed from Tempest'),
]
debug_group = cfg.OptGroup(name="debug",
@@ -1078,17 +997,6 @@
]
DefaultGroup = [
- cfg.StrOpt('resources_prefix',
- default='tempest',
- help="Prefix to be added when generating the name for "
- "test resources. It can be used to discover all "
- "resources associated with a specific test run when "
- "running tempest on a real-life cloud",
- deprecated_for_removal=True,
- deprecated_reason="It is enough to add 'tempest' as this "
- "prefix to ideintify resources which are "
- "created by Tempest and no projects set "
- "this option on OpenStack dev community."),
cfg.BoolOpt('pause_teardown',
default=False,
help="""Whether to pause a test in global teardown.
@@ -1116,7 +1024,6 @@
(volume_feature_group, VolumeFeaturesGroup),
(object_storage_group, ObjectStoreGroup),
(object_storage_feature_group, ObjectStoreFeaturesGroup),
- (orchestration_group, OrchestrationGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
@@ -1183,7 +1090,6 @@
self.object_storage = _CONF['object-storage']
self.object_storage_feature_enabled = _CONF[
'object-storage-feature-enabled']
- self.orchestration = _CONF.orchestration
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
self.debug = _CONF.debug
diff --git a/tempest/lib/api_schema/response/compute/v2_45/__init__.py b/tempest/lib/api_schema/response/compute/v2_45/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_45/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_45/images.py b/tempest/lib/api_schema/response/compute/v2_45/images.py
new file mode 100644
index 0000000..8a48f36
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_45/images.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# The 2.45 microversion removes the "location" header and adds "image_id"
+# to the response body.
+create_image = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'image_id': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['image_id']
+ }
+}
+
+# NOTE(mriedem): The compute proxy APIs for showing/listing and deleting
+# images were deprecated in microversion 2.35, and the compute proxy APIs for
+# working with image metadata were deprecated in microversion 2.39. Therefore,
+# client-side code shouldn't rely on those APIs in the compute images client
+# past those microversions and should instead use the Glance images client
+# directly.
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index f39ecbc..3a97801 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -101,12 +101,15 @@
:type project_domain_name: string
:param project_domain_id: Project's domain ID
:type project_domain_id: string
+ :param identity_api_version: Version of the Identity API
+ :type identity_api_version: string
"""
def __init__(self, username='', password='', tenant_name='', uri='',
cli_dir='', insecure=False, prefix='', user_domain_name=None,
user_domain_id=None, project_domain_name=None,
- project_domain_id=None, *args, **kwargs):
+ project_domain_id=None, identity_api_version=None, *args,
+ **kwargs):
"""Initialize a new CLIClient object."""
super(CLIClient, self).__init__()
self.cli_dir = cli_dir if cli_dir else '/usr/bin'
@@ -120,6 +123,7 @@
self.user_domain_id = user_domain_id
self.project_domain_name = project_domain_name
self.project_domain_id = project_domain_id
+ self.identity_api_version = identity_api_version
def nova(self, action, flags='', params='', fail_ok=False,
endpoint_type='publicURL', merge_stderr=False):
@@ -374,12 +378,15 @@
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
- creds = ('--os-username %s --os-tenant-name %s --os-password %s '
+ creds = ('--os-username %s --os-project-name %s --os-password %s '
'--os-auth-url %s' %
(self.username,
self.tenant_name,
self.password,
self.uri))
+ if self.identity_api_version:
+ creds += ' --os-identity-api-version %s' % (
+ self.identity_api_version)
if self.user_domain_name is not None:
creds += ' --os-user-domain-name %s' % self.user_domain_name
if self.user_domain_id is not None:
diff --git a/tempest/lib/common/fixed_network.py b/tempest/lib/common/fixed_network.py
index e2054a4..875a79d 100644
--- a/tempest/lib/common/fixed_network.py
+++ b/tempest/lib/common/fixed_network.py
@@ -38,7 +38,12 @@
raise exceptions.InvalidTestResource(type='network', name=name)
networks = compute_networks_client.list_networks()['networks']
- networks = [n for n in networks if n['label'] == name]
+ # NOTE(zhufl) compute networks_client uses 'label' as network name field,
+ # while neutron networks_client uses 'name' as network name field.
+ try:
+ networks = [n for n in networks if n['label'] == name]
+ except KeyError:
+ networks = [n for n in networks if n['name'] == name]
# Check that a network exists, else raise an InvalidConfigurationException
if len(networks) == 1:
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 83db513..fcdeb17 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -344,11 +344,11 @@
net_creds = cred_provider.TestResources(credential)
net_clients = clients.ServiceClients(credentials=credential,
identity_uri=self.identity_uri)
- compute_network_client = net_clients.compute.NetworksClient()
+ networks_client = net_clients.network.NetworksClient()
net_name = self.hash_dict['networks'].get(hash, None)
try:
network = fixed_network.get_network_from_name(
- net_name, compute_network_client)
+ net_name, networks_client)
except lib_exc.InvalidTestResource:
network = {}
net_creds.set_resources(network=network)
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index cd4092b..94fab00 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
import sys
import netaddr
@@ -25,13 +26,14 @@
def debug_ssh(function):
"""Decorator to generate extra debug info in case off SSH failure"""
+ @functools.wraps(function)
def wrapper(self, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except Exception as e:
caller = test_utils.find_test_caller() or "not found"
if not isinstance(e, tempest.lib.exceptions.SSHTimeout):
- message = ('Initializing SSH connection to %(ip)s failed. '
+ message = ('Executing command on %(ip)s failed. '
'Error: %(error)s' % {'ip': self.ip_address,
'error': e})
message = '(%s) %s' % (caller, message)
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index c2e93ee..2a9f3a9 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -102,13 +102,13 @@
now = time.time()
begin_time = now
timeout = now + duration
+ func_name = getattr(func, '__name__', getattr(func.__class__, '__name__'))
while now < timeout:
if func(*args, **kwargs):
LOG.debug("Call %s returns true in %f seconds",
- getattr(func, '__name__'), time.time() - begin_time)
+ func_name, time.time() - begin_time)
return True
time.sleep(sleep_for)
now = time.time()
- LOG.debug("Call %s returns false in %f seconds",
- getattr(func, '__name__'), duration)
+ LOG.debug("Call %s returns false in %f seconds", func_name, duration)
return False
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index ef1003b..e99dd24 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -15,7 +15,6 @@
import functools
import uuid
-import debtcollector.removals
from oslo_log import log as logging
import six
import testtools
@@ -56,9 +55,9 @@
"""
def decorator(f):
@functools.wraps(f)
- def wrapper(self, *func_args, **func_kwargs):
+ def wrapper(*func_args, **func_kwargs):
try:
- return f(self, *func_args, **func_kwargs)
+ return f(*func_args, **func_kwargs)
except Exception as exc:
exc_status_code = getattr(exc, 'status_code', None)
if status_code is None or status_code == exc_status_code:
@@ -87,25 +86,6 @@
return decorator
-@debtcollector.removals.remove(removal_version='Queen')
-class skip_unless_attr(object):
- """Decorator to skip tests if a specified attr does not exists or False"""
- def __init__(self, attr, msg=None):
- self.attr = attr
- self.message = msg or ("Test case attribute %s not found "
- "or False") % attr
-
- def __call__(self, func):
- @functools.wraps(func)
- def _skipper(*args, **kw):
- """Wrapped skipper function."""
- testobj = args[0]
- if not getattr(testobj, self.attr, False):
- raise testtools.TestCase.skipException(self.message)
- func(*args, **kw)
- return _skipper
-
-
def attr(**kwargs):
"""A decorator which applies the testtools attr decorator
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 9b2e87e..13af890 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -96,7 +96,7 @@
class Conflict(ClientRestClientException):
status_code = 409
- message = "An object with that identifier already exists"
+ message = "Conflict with state of target resource"
class Gone(ClientRestClientException):
diff --git a/tempest/lib/services/compute/images_client.py b/tempest/lib/services/compute/images_client.py
index 86bea9e..0f4eb42 100644
--- a/tempest/lib/services/compute/images_client.py
+++ b/tempest/lib/services/compute/images_client.py
@@ -17,6 +17,7 @@
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import images as schema
+from tempest.lib.api_schema.response.compute.v2_45 import images as schemav245
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import base_compute_client
@@ -24,6 +25,10 @@
class ImagesClient(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.44', 'schema': schema},
+ {'min': '2.45', 'max': None, 'schema': schemav245}]
+
def create_image(self, server_id, **kwargs):
"""Create an image of the original server.
@@ -36,7 +41,10 @@
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body)
- self.validate_response(schema.create_image, resp, body)
+ _schema = self.get_schema(self.schema_versions_info)
+ if body:
+ body = json.loads(body)
+ self.validate_response(_schema.create_image, resp, body)
return rest_client.ResponseBody(resp, body)
def list_images(self, detail=False, **params):
diff --git a/tempest/lib/services/compute/quota_classes_client.py b/tempest/lib/services/compute/quota_classes_client.py
index 0fe9868..64e06f4 100644
--- a/tempest/lib/services/compute/quota_classes_client.py
+++ b/tempest/lib/services/compute/quota_classes_client.py
@@ -35,8 +35,9 @@
def update_quota_class_set(self, quota_class_id, **kwargs):
"""Update the quota class's limits for one or more resources.
- # NOTE: Current api-site doesn't contain this API description.
- # LP: https://bugs.launchpad.net/nova/+bug/1602400
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/compute/#create-or-update-quotas-for-quota-class
"""
post_body = json.dumps({'quota_class_set': kwargs})
diff --git a/tempest/lib/services/compute/quotas_client.py b/tempest/lib/services/compute/quotas_client.py
index daf4bc0..12df895 100644
--- a/tempest/lib/services/compute/quotas_client.py
+++ b/tempest/lib/services/compute/quotas_client.py
@@ -28,8 +28,8 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref-compute-v2.1.html/#show-a-quota
- http://developer.openstack.org/api-ref-compute-v2.1.html/#show-the-detail-of-quota
+ https://developer.openstack.org/api-ref/compute/#show-a-quota
+ https://developer.openstack.org/api-ref/compute/#show-the-detail-of-quota
"""
params = {}
@@ -49,7 +49,10 @@
return rest_client.ResponseBody(resp, body)
def show_default_quota_set(self, tenant_id):
- """List the default quota set for a tenant."""
+ """List the default quota set for a tenant.
+
+ https://developer.openstack.org/api-ref/compute/#list-default-quotas-for-tenant
+ """
url = 'os-quota-sets/%s/defaults' % tenant_id
resp, body = self.get(url)
@@ -79,7 +82,10 @@
return rest_client.ResponseBody(resp, body)
def delete_quota_set(self, tenant_id):
- """Delete the tenant's quota set."""
+ """Delete the tenant's quota set.
+
+ https://developer.openstack.org/api-ref/compute/#revert-quotas-to-defaults
+ """
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
self.validate_response(schema.delete_quota, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 598d5a6..09bccab 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -126,7 +126,7 @@
For a full list of available parameters, please refer to the official
API reference:
- http://developer.openstack.org/api-ref-compute-v2.1.html#showServer
+ https://developer.openstack.org/api-ref/compute/#show-server-details
"""
resp, body = self.get("servers/%s" % server_id)
body = json.loads(body)
@@ -321,7 +321,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#create-or-replace-metadata-items
+ https://developer.openstack.org/api-ref/compute/#replace-metadata-items
"""
if no_metadata_field:
post_body = ""
@@ -338,7 +338,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#update-metadata-items
+ https://developer.openstack.org/api-ref/compute/#create-or-update-metadata-items
"""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % server_id,
@@ -609,9 +609,7 @@
For a full list of available parameters, please refer to the official
API reference:
- TODO (markus_z) The api-ref for that isn't yet available, update this
- here when the docs in Nova are updated. The old API is at
- http://developer.openstack.org/api-ref/compute/#get-serial-console-os-getserialconsole-action
+ https://developer.openstack.org/api-ref/compute/#create-remote-console
"""
param = {
'remote_console': {
@@ -722,7 +720,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action
+ https://developer.openstack.org/api-ref/compute/#get-vnc-console-os-getvncconsole-action-deprecated
"""
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
@@ -732,7 +730,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action
+ https://developer.openstack.org/api-ref/compute/#add-associate-fixed-ip-addfixedip-action-deprecated
"""
return self.action(server_id, 'addFixedIp', **kwargs)
@@ -741,7 +739,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action
+ https://developer.openstack.org/api-ref/compute/#remove-disassociate-fixed-ip-removefixedip-action-deprecated
"""
return self.action(server_id, 'removeFixedIp', **kwargs)
diff --git a/tempest/lib/services/network/metering_label_rules_client.py b/tempest/lib/services/network/metering_label_rules_client.py
index 36cf8e3..9542e8f 100644
--- a/tempest/lib/services/network/metering_label_rules_client.py
+++ b/tempest/lib/services/network/metering_label_rules_client.py
@@ -16,6 +16,12 @@
class MeteringLabelRulesClient(base.BaseNetworkClient):
def create_metering_label_rule(self, **kwargs):
+ """Create metering label rule.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#create-metering-label-rule
+ """
uri = '/metering/metering-label-rules'
post_data = {'metering_label_rule': kwargs}
return self.create_resource(uri, post_data)
@@ -29,5 +35,11 @@
return self.delete_resource(uri)
def list_metering_label_rules(self, **filters):
+ """List metering label rules.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#list-metering-label-rules
+ """
uri = '/metering/metering-label-rules'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/quotas_client.py b/tempest/lib/services/network/quotas_client.py
index 752b253..f23af88 100644
--- a/tempest/lib/services/network/quotas_client.py
+++ b/tempest/lib/services/network/quotas_client.py
@@ -18,6 +18,12 @@
class QuotasClient(base.BaseNetworkClient):
def update_quotas(self, tenant_id, **kwargs):
+ """Update quota for a project.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#update-quota-for-a-project
+ """
put_body = {'quota': kwargs}
uri = '/quotas/%s' % tenant_id
return self.update_resource(uri, put_body)
@@ -35,3 +41,8 @@
def list_quotas(self, **filters):
uri = '/quotas'
return self.list_resources(uri, **filters)
+
+ def show_default_quotas(self, tenant_id):
+ """List default quotas for a project."""
+ uri = '/quotas/%s/default' % tenant_id
+ return self.show_resource(uri)
diff --git a/tempest/lib/services/network/service_providers_client.py b/tempest/lib/services/network/service_providers_client.py
index 0ee9bc3..01313a0 100644
--- a/tempest/lib/services/network/service_providers_client.py
+++ b/tempest/lib/services/network/service_providers_client.py
@@ -16,6 +16,11 @@
class ServiceProvidersClient(base.BaseNetworkClient):
def list_service_providers(self, **filters):
- """Lists service providers."""
+ """Lists service providers.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#list-service-providers
+ """
uri = '/service-providers'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 67f01a6..6b097c1 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -34,7 +34,7 @@
Account Metadata can be created, updated or deleted based on
metadata header or value. For detailed info, please refer to the
official API reference:
- http://developer.openstack.org/api-ref/object-storage/?expanded=create-update-or-delete-account-metadata-detail
+ https://developer.openstack.org/api-ref/object-store/#create-update-or-delete-account-metadata
"""
headers = {}
if create_update_metadata:
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 2da8e24..430e0d4 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -97,7 +97,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://developer.openstack.org/api-ref/object-storage/?expanded=show-container-details-and-list-objects-detail
+ https://developer.openstack.org/api-ref/object-store/#show-container-details-and-list-objects
"""
url = str(container_name)
diff --git a/tempest/lib/services/volume/v2/quota_classes_client.py b/tempest/lib/services/volume/v2/quota_classes_client.py
index d40d2d9..733b1ac 100644
--- a/tempest/lib/services/volume/v2/quota_classes_client.py
+++ b/tempest/lib/services/volume/v2/quota_classes_client.py
@@ -26,8 +26,9 @@
def show_quota_class_set(self, quota_class_id):
"""List quotas for a quota class.
- TODO: Current api-site doesn't contain this API description.
- LP: https://bugs.launchpad.net/nova/+bug/1602400
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/index.html#show-quota-classes
"""
url = 'os-quota-class-sets/%s' % quota_class_id
resp, body = self.get(url)
@@ -38,8 +39,9 @@
def update_quota_class_set(self, quota_class_id, **kwargs):
"""Update quotas for a quota class.
- TODO: Current api-site doesn't contain this API description.
- LP: https://bugs.launchpad.net/nova/+bug/1602400
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v2/index.html#update-quota-classes
"""
url = 'os-quota-class-sets/%s' % quota_class_id
put_body = json.dumps({'quota_class_set': kwargs})
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index 97bac48..1b47201 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -75,3 +75,67 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def update_group_type(self, group_type_id, **kwargs):
+ """Updates a group type.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-group-type
+ """
+ post_body = json.dumps({'group_type': kwargs})
+ resp, body = self.put('group_types/%s' % group_type_id, post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def create_or_update_group_type_specs(self, group_type_id, group_specs):
+ """Creates new group specs or updates existing group specs.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#create-group-specs-for-a-group-type
+ """
+ url = "group_types/%s/group_specs" % group_type_id
+ post_body = json.dumps({'group_specs': group_specs})
+ resp, body = self.post(url, post_body)
+ body = json.loads(body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_group_type_specs(self, group_type_id):
+ """Lists all group specs for a given group type."""
+ url = 'group_types/%s/group_specs' % group_type_id
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_group_type_specs_item(self, group_type_id, spec_id):
+ """Shows specified item of group specs for a given group type."""
+ url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_group_type_specs_item(self, group_type_id, spec_id, spec):
+ """Updates specified item of group specs for a given group type.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-one-specific-group-spec-for-a-group-type
+ """
+ url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
+ put_body = json.dumps(spec)
+ resp, body = self.put(url, put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_group_type_specs_item(self, group_type_id, spec_id):
+ """Deletes specified item of group specs for a given group type."""
+ resp, body = self.delete("group_types/%s/group_specs/%s" % (
+ group_type_id, spec_id))
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
index ad300c2..efcd139 100644
--- a/tempest/scenario/README.rst
+++ b/tempest/scenario/README.rst
@@ -15,6 +15,7 @@
Any scenario test should have a real-life use case. An example would be:
- "As operator I want to start with a blank environment":
+
1. upload a glance image
2. deploy a vm from it
3. ssh to the guest
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 7c404ad..e4ab11c 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -195,6 +195,8 @@
waiters.wait_for_server_status(self.servers_client, server['id'],
'VERIFY_RESIZE')
self.servers_client.confirm_resize_server(server['id'])
+ server = self.servers_client.show_server(server['id'])['server']
+ self.assertEqual(resize_flavor, server['flavor']['id'])
self._wait_server_status_and_check_network_connectivity(
server, keypair, floating_ip)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 64ea8f6..beb039c 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -208,7 +208,19 @@
# boot instance from EBS image
instance = self.create_server(image_id=image['id'])
- # just ensure that instance booted
+
+ # Verify the server was created from the image
+ created_volume = instance['os-extended-volumes:volumes_attached']
+ self.assertNotEmpty(created_volume, "No volume attachment found.")
+ created_volume_info = self.volumes_client.show_volume(
+ created_volume[0]['id'])['volume']
+ self.assertEqual(instance['id'],
+ created_volume_info['attachments'][0]['server_id'])
+ self.assertEqual(created_volume[0]['id'],
+ created_volume_info['attachments'][0]['volume_id'])
+ self.assertEqual(
+ volume_origin['volume_image_metadata']['image_id'],
+ created_volume_info['volume_image_metadata']['image_id'])
# delete instance
self._delete_server(instance)
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index cd10bbd..ff7996a 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -38,6 +38,11 @@
credentials = ['primary', 'admin']
@classmethod
+ def setup_clients(cls):
+ super(TestVolumeMigrateRetypeAttached, cls).setup_clients()
+ cls.admin_volumes_client = cls.os_admin.volumes_v2_client
+
+ @classmethod
def skip_checks(cls):
super(TestVolumeMigrateRetypeAttached, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
@@ -76,8 +81,10 @@
return source_body['name'], dest_body['name']
def _volume_retype_with_migration(self, volume_id, new_volume_type):
+ # NOTE: The 'on-demand' migration requires admin operation, so
+ # admin_volumes_client() should be used here.
migration_policy = 'on-demand'
- self.volumes_client.retype_volume(
+ self.admin_volumes_client.retype_volume(
volume_id, new_type=new_volume_type,
migration_policy=migration_policy)
waiters.wait_for_volume_retype(self.volumes_client,
diff --git a/tempest/test.py b/tempest/test.py
index 9da85d5..27e0165 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -836,7 +836,7 @@
manager = cls.get_client_manager()
# Make sure cred_provider exists and get a network client
- networks_client = manager.compute_networks_client
+ networks_client = manager.networks_client
cred_provider = cls._get_credentials_provider()
# In case of nova network, isolated tenants are not able to list the
# network configured in fixed_network_name, even if they can use it
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index 8bf4c5b..fd9af08 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -153,17 +153,14 @@
def test_generate_resources_no_admin(self):
cfg.CONF.set_default('swift', False, group='service_available')
- cfg.CONF.set_default('heat', False, group='service_available')
cfg.CONF.set_default('operator_role', 'fake_operator',
group='object-storage')
cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
group='object-storage')
- cfg.CONF.set_default('stack_owner_role', 'fake_owner',
- group='orchestration')
resources = account_generator.generate_resources(
self.cred_provider, admin=False)
resource_types = [k for k, _ in resources]
- # No admin, no heat, no swift, expect two credentials only
+ # No admin, no swift, expect two credentials only
self.assertEqual(2, len(resources))
# Ensure create_user was invoked twice (two distinct users)
self.assertEqual(2, self.user_create_fixture.mock.call_count)
@@ -180,17 +177,14 @@
def test_generate_resources_admin(self):
cfg.CONF.set_default('swift', False, group='service_available')
- cfg.CONF.set_default('heat', False, group='service_available')
cfg.CONF.set_default('operator_role', 'fake_operator',
group='object-storage')
cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
group='object-storage')
- cfg.CONF.set_default('stack_owner_role', 'fake_owner',
- group='orchestration')
resources = account_generator.generate_resources(
self.cred_provider, admin=True)
resource_types = [k for k, _ in resources]
- # Admin, no heat, no swift, expect three credentials only
+ # Admin, no swift, expect three credentials only
self.assertEqual(3, len(resources))
# Ensure create_user was invoked 3 times (3 distinct users)
self.assertEqual(3, self.user_create_fixture.mock.call_count)
@@ -205,28 +199,24 @@
self.assertIsNotNone(resource[1].router)
self.assertIsNotNone(resource[1].subnet)
- def test_generate_resources_swift_heat_admin(self):
+ def test_generate_resources_swift_admin(self):
cfg.CONF.set_default('swift', True, group='service_available')
- cfg.CONF.set_default('heat', True, group='service_available')
cfg.CONF.set_default('operator_role', 'fake_operator',
group='object-storage')
cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
group='object-storage')
- cfg.CONF.set_default('stack_owner_role', 'fake_owner',
- group='orchestration')
resources = account_generator.generate_resources(
self.cred_provider, admin=True)
resource_types = [k for k, _ in resources]
# all options on, expect six credentials
self.assertEqual(6, len(resources))
# Ensure create_user was invoked 6 times (6 distinct users)
- self.assertEqual(6, self.user_create_fixture.mock.call_count)
+ self.assertEqual(5, self.user_create_fixture.mock.call_count)
self.assertIn('primary', resource_types)
self.assertIn('alt', resource_types)
self.assertIn('admin', resource_types)
self.assertIn(['fake_operator'], resource_types)
self.assertIn(['fake_reseller'], resource_types)
- self.assertIn(['fake_owner', 'fake_operator'], resource_types)
for resource in resources:
self.assertIsNotNone(resource[1].network)
self.assertIsNotNone(resource[1].router)
@@ -258,7 +248,6 @@
self.opts)
self.mock_resource_creation()
cfg.CONF.set_default('swift', True, group='service_available')
- cfg.CONF.set_default('heat', True, group='service_available')
self.resources = account_generator.generate_resources(
self.cred_provider, admin=True)
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index ee63684..4a2fff4 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -39,11 +39,12 @@
self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth',
group='identity')
self.conf.set_default('neutron', True, group='service_available')
- self.conf.set_default('heat', True, group='service_available')
- if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))):
- os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
+ lock_path = str(os.environ.get('OS_TEST_LOCK_PATH',
+ os.environ.get('TMPDIR', '/tmp')))
+ if not os.path.exists(lock_path):
+ os.mkdir(lock_path)
lockutils.set_defaults(
- lock_path=str(os.environ.get('OS_TEST_LOCK_PATH')),
+ lock_path=lock_path,
)
self.conf.set_default('auth_version', 'v2', group='identity')
for config_option in ['username', 'password', 'project_name']:
diff --git a/tempest/tests/lib/cli/test_execute.py b/tempest/tests/lib/cli/test_execute.py
index c276386..c069af5 100644
--- a/tempest/tests/lib/cli/test_execute.py
+++ b/tempest/tests/lib/cli/test_execute.py
@@ -125,3 +125,27 @@
mock_execute.call_args[0][2])
self.assertNotIn('--os-project-domain-name',
mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_default_api_version(self, mock_execute):
+ cli = cli_base.CLIClient()
+ cli.openstack('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertNotIn('--os-identity-api-version ',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_empty_api_version(self, mock_execute):
+ cli = cli_base.CLIClient(identity_api_version='')
+ cli.openstack('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertNotIn('--os-identity-api-version ',
+ mock_execute.call_args[0][2])
+
+ @mock.patch.object(cli_base, 'execute')
+ def test_execute_with_explicit_api_version(self, mock_execute):
+ cli = cli_base.CLIClient(identity_api_version='0.0')
+ cli.openstack('action')
+ self.assertEqual(mock_execute.call_count, 1)
+ self.assertIn('--os-identity-api-version 0.0 ',
+ mock_execute.call_args[0][2])
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index 9b10159..25df2a7 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -339,7 +339,7 @@
return_value=test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
**self.fixed_params)
- with mock.patch('tempest.lib.services.compute.networks_client.'
+ with mock.patch('tempest.lib.services.network.networks_client.'
'NetworksClient.list_networks',
return_value={'networks': [{'name': 'network-2',
'id': 'fake-id',
diff --git a/tempest/tests/lib/services/network/test_networks_client.py b/tempest/tests/lib/services/network/test_networks_client.py
new file mode 100644
index 0000000..078f4b0
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_networks_client.py
@@ -0,0 +1,242 @@
+# Copyright 2017 AT&T Corporation.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import networks_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestNetworksClient(base.BaseServiceTest):
+
+ FAKE_NETWORKS = {
+ "networks": [
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "mtu": 0,
+ "name": "net1",
+ "port_security_enabled": True,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "router:external": False,
+ "shared": False,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": True,
+ "description": ""
+ },
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "mtu": 0,
+ "name": "net2",
+ "port_security_enabled": True,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819",
+ "router:external": False,
+ "shared": False,
+ "status": "ACTIVE",
+ "subnets": [
+ "08eae331-0402-425a-923c-34f7cfe39c1b"
+ ],
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": False,
+ "description": ""
+ }
+ ]
+ }
+
+ FAKE_NETWORK_ID = "d32019d3-bc6e-4319-9c1d-6722fc136a22"
+
+ FAKE_NETWORK1 = {
+ "name": "net1",
+ "admin_state_up": True,
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e"
+ }
+
+ FAKE_NETWORK2 = {
+ "name": "net2",
+ "admin_state_up": True,
+ "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819"
+ }
+
+ FAKE_NETWORKS_REQ = {
+ "networks": [
+ FAKE_NETWORK1,
+ FAKE_NETWORK2
+ ]
+ }
+
+ FAKE_DHCP_AGENT_NETWORK_ID = "80515c45-651f-4f9a-b82b-2ca8a7301a8d"
+
+ FAKE_DHCP_AGENTS = {
+ "agents": [
+ {
+ "binary": "neutron-dhcp-agent",
+ "description": None,
+ "admin_state_up": True,
+ "heartbeat_timestamp": "2017-06-22 18:29:50",
+ "availability_zone": "nova",
+ "alive": True,
+ "topic": "dhcp_agent",
+ "host": "osboxes",
+ "agent_type": "DHCP agent",
+ "resource_versions": {},
+ "created_at": "2017-06-19 21:39:51",
+ "started_at": "2017-06-19 21:39:51",
+ "id": "b6cfb7a1-6ac4-4980-993c-9d295d37062e",
+ "configurations": {
+ "subnets": 2,
+ "dhcp_lease_duration": 86400,
+ "dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
+ "networks": 1,
+ "log_agent_heartbeats": False,
+ "ports": 3
+ }
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestNetworksClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.networks_client = networks_client.NetworksClient(
+ fake_auth, "network", "regionOne")
+
+ def _test_list_networks(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.list_networks,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_NETWORKS,
+ bytes_body,
+ 200)
+
+ def _test_create_network(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.create_network,
+ "tempest.lib.common.rest_client.RestClient.post",
+ {"network": self.FAKE_NETWORKS["networks"][0]},
+ bytes_body,
+ 201,
+ **self.FAKE_NETWORK1)
+
+ def _test_create_bulk_networks(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.create_bulk_networks,
+ "tempest.lib.common.rest_client.RestClient.post",
+ self.FAKE_NETWORKS,
+ bytes_body,
+ 201,
+ networks=self.FAKE_NETWORKS_REQ)
+
+ def _test_show_network(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.show_network,
+ "tempest.lib.common.rest_client.RestClient.get",
+ {"network": self.FAKE_NETWORKS["networks"][0]},
+ bytes_body,
+ 200,
+ network_id=self.FAKE_NETWORK_ID)
+
+ def _test_update_network(self, bytes_body=False):
+ update_kwargs = {
+ "name": "sample_network_5_updated",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e"
+ }
+
+ resp_body = {
+ "network": copy.deepcopy(
+ self.FAKE_NETWORKS["networks"][0]
+ )
+ }
+ resp_body["network"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.networks_client.update_network,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 200,
+ network_id=self.FAKE_NETWORK_ID,
+ **update_kwargs)
+
+ def _test_list_dhcp_agents_on_hosting_network(self, bytes_body=False):
+ self.check_service_client_function(
+ self.networks_client.list_dhcp_agents_on_hosting_network,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_DHCP_AGENTS,
+ bytes_body,
+ 200,
+ network_id=self.FAKE_DHCP_AGENT_NETWORK_ID)
+
+ def test_delete_network(self):
+ self.check_service_client_function(
+ self.networks_client.delete_network,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ network_id=self.FAKE_NETWORK_ID)
+
+ def test_list_networks_with_str_body(self):
+ self._test_list_networks()
+
+ def test_list_networks_with_bytes_body(self):
+ self._test_list_networks(bytes_body=True)
+
+ def test_create_network_with_str_body(self):
+ self._test_create_network()
+
+ def test_create_network_with_bytes_body(self):
+ self._test_create_network(bytes_body=True)
+
+ def test_create_bulk_network_with_str_body(self):
+ self._test_create_bulk_networks()
+
+ def test_create_bulk_network_with_bytes_body(self):
+ self._test_create_bulk_networks(bytes_body=True)
+
+ def test_show_network_with_str_body(self):
+ self._test_show_network()
+
+ def test_show_network_with_bytes_body(self):
+ self._test_show_network(bytes_body=True)
+
+ def test_update_network_with_str_body(self):
+ self._test_update_network()
+
+ def test_update_network_with_bytes_body(self):
+ self._test_update_network(bytes_body=True)
+
+ def test_list_dhcp_agents_on_hosting_network_with_str_body(self):
+ self._test_list_dhcp_agents_on_hosting_network()
+
+ def test_list_dhcp_agents_on_hosting_network_with_bytes_body(self):
+ self._test_list_dhcp_agents_on_hosting_network(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_quotas_client.py b/tempest/tests/lib/services/network/test_quotas_client.py
index e76bc9c..5a09911 100644
--- a/tempest/tests/lib/services/network/test_quotas_client.py
+++ b/tempest/tests/lib/services/network/test_quotas_client.py
@@ -38,6 +38,20 @@
]
}
+ FAKE_PROJECT_QUOTAS = {
+ "quota": {
+ "floatingip": 50,
+ "network": 10,
+ "port": 50,
+ "rbac_policy": -1,
+ "router": 10,
+ "security_group": 10,
+ "security_group_rule": 100,
+ "subnet": 10,
+ "subnetpool": -1
+ }
+ }
+
FAKE_QUOTA_TENANT_ID = "bab7d5c60cd041a0a36f7c4b6e1dd978"
def setUp(self):
@@ -58,7 +72,16 @@
self.check_service_client_function(
self.quotas_client.show_quotas,
"tempest.lib.common.rest_client.RestClient.get",
- {"quota": self.FAKE_QUOTAS["quotas"][0]},
+ self.FAKE_PROJECT_QUOTAS,
+ bytes_body,
+ 200,
+ tenant_id=self.FAKE_QUOTA_TENANT_ID)
+
+ def _test_show_default_quotas(self, bytes_body=False):
+ self.check_service_client_function(
+ self.quotas_client.show_default_quotas,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_PROJECT_QUOTAS,
bytes_body,
200,
tenant_id=self.FAKE_QUOTA_TENANT_ID)
@@ -67,7 +90,7 @@
self.check_service_client_function(
self.quotas_client.update_quotas,
"tempest.lib.common.rest_client.RestClient.put",
- {"quota": self.FAKE_QUOTAS["quotas"][0]},
+ self.FAKE_PROJECT_QUOTAS,
bytes_body,
200,
tenant_id=self.FAKE_QUOTA_TENANT_ID)
@@ -92,6 +115,12 @@
def test_show_quotas_with_bytes_body(self):
self._test_show_quotas(bytes_body=True)
+ def test_show_default_quotas_with_str_body(self):
+ self._test_show_default_quotas()
+
+ def test_show_default_quotas_with_bytes_body(self):
+ self._test_show_default_quotas(bytes_body=True)
+
def test_update_quotas_with_str_body(self):
self._test_update_quotas()
diff --git a/tempest/tests/lib/services/volume/v3/test_group_types_client.py b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
index 0f456a2..c60cc36 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
from tempest.lib.services.volume.v3 import group_types_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -67,6 +69,28 @@
]
}
+ FAKE_CREATE_GROUP_TYPE_SPECS = {
+ "group_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ }
+
+ FAKE_LIST_GROUP_TYPE_SPECS = {
+ "group_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+ }
+
+ FAKE_SHOW_GROUP_TYPE_SPECS_ITEM = {
+ "key1": "value1"
+ }
+
+ FAKE_UPDATE_GROUP_TYPE_SPECS_ITEM = {
+ "key2": "value2-updated"
+ }
+
def setUp(self):
super(TestGroupTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -97,6 +121,57 @@
self.FAKE_LIST_GROUP_TYPES,
bytes_body)
+ def _test_update_group_types(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_INFO_GROUP_TYPE)
+ resp_body['group_type'].pop('created_at')
+
+ self.check_service_client_function(
+ self.client.update_group_type,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ name='updated-group-type-name')
+
+ def _test_create_or_update_group_type_specs(self, bytes_body=False):
+ group_specs = self.FAKE_CREATE_GROUP_TYPE_SPECS['group_specs']
+ self.check_service_client_function(
+ self.client.create_or_update_group_type_specs,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_GROUP_TYPE_SPECS,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ group_specs=group_specs,
+ status=202)
+
+ def _test_list_group_type_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_group_type_specs,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_GROUP_TYPE_SPECS,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+
+ def _test_show_group_type_specs_item(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_group_type_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SHOW_GROUP_TYPE_SPECS_ITEM,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ spec_id="key1")
+
+ def _test_update_group_type_specs_item(self, bytes_body=False):
+ spec = self.FAKE_UPDATE_GROUP_TYPE_SPECS_ITEM
+ self.check_service_client_function(
+ self.client.update_group_type_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_GROUP_TYPE_SPECS_ITEM,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ spec_id="key2",
+ spec=spec)
+
def test_create_group_type_with_str_body(self):
self._test_create_group_type()
@@ -122,3 +197,42 @@
def test_list_group_types_with_bytes_body(self):
self._test_list_group_types(bytes_body=True)
+
+ def test_update_group_types_with_str_body(self):
+ self._test_update_group_types()
+
+ def test_update_group_types_with_bytes_body(self):
+ self._test_update_group_types(bytes_body=True)
+
+ def test_create_or_update_group_type_specs_with_str_body(self):
+ self._test_create_or_update_group_type_specs()
+
+ def test_create_or_update_group_type_specs_with_bytes_body(self):
+ self._test_create_or_update_group_type_specs(bytes_body=True)
+
+ def test_list_group_type_specs_with_str_body(self):
+ self._test_list_group_type_specs()
+
+ def test_list_group_type_specs_with_bytes_body(self):
+ self._test_list_group_type_specs(bytes_body=True)
+
+ def test_show_group_type_specs_item_with_str_body(self):
+ self._test_show_group_type_specs_item()
+
+ def test_show_group_type_specs_item_with_bytes_body(self):
+ self._test_show_group_type_specs_item(bytes_body=True)
+
+ def test_update_group_type_specs_item_with_str_body(self):
+ self._test_update_group_type_specs_item()
+
+ def test_update_group_type_specs_item_with_bytes_body(self):
+ self._test_update_group_type_specs_item(bytes_body=True)
+
+ def test_delete_group_type_specs_item(self):
+ self.check_service_client_function(
+ self.client.delete_group_type_specs_item,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ group_type_id='0e58433f-d108-4bf3-a22c-34e6b71ef86b',
+ spec_id='key1',
+ status=202)
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index bbebcd3..ed0eea3 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -125,35 +125,6 @@
self.assertRaises(ValueError, self._test_helper, _id)
-class TestSkipUnlessAttrDecorator(base.TestCase):
- def _test_skip_unless_attr(self, attr, expected_to_skip=True):
- class TestFoo(test.BaseTestCase):
- expected_attr = not expected_to_skip
-
- @decorators.skip_unless_attr(attr)
- def test_foo(self):
- pass
-
- t = TestFoo('test_foo')
- if expected_to_skip:
- self.assertRaises(testtools.TestCase.skipException,
- t.test_foo)
- else:
- try:
- t.test_foo()
- except Exception:
- raise testtools.TestCase.failureException()
-
- def test_skip_attr_does_not_exist(self):
- self._test_skip_unless_attr('unexpected_attr')
-
- def test_skip_attr_false(self):
- self._test_skip_unless_attr('expected_attr')
-
- def test_no_skip_for_attr_exist_and_true(self):
- self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
-
-
class TestRelatedBugDecorator(base.TestCase):
def test_relatedbug_when_no_exception(self):
f = mock.Mock()
diff --git a/tempest/tests/test_base_test.py b/tempest/tests/test_base_test.py
index 011bc9b..2b5a947 100644
--- a/tempest/tests/test_base_test.py
+++ b/tempest/tests/test_base_test.py
@@ -41,7 +41,7 @@
def test_get_tenant_network(self, mock_gtn, mock_gprov, mock_gcm):
net_client = mock.Mock()
mock_prov = mock.Mock()
- mock_gcm.return_value.compute_networks_client = net_client
+ mock_gcm.return_value.networks_client = net_client
mock_gprov.return_value = mock_prov
test.BaseTestCase.get_tenant_network()
@@ -85,7 +85,7 @@
mock_gcm):
net_client = mock.Mock()
mock_prov = mock.Mock()
- mock_gcm.return_value.compute_networks_client = net_client
+ mock_gcm.return_value.networks_client = net_client
mock_gprov.return_value = mock_prov
test.BaseTestCase.get_tenant_network(credentials_type='alt')
@@ -102,7 +102,7 @@
mock_gcm):
net_client = mock.Mock()
mock_prov = mock.Mock()
- mock_gcm.return_value.compute_networks_client = net_client
+ mock_gcm.return_value.networks_client = net_client
mock_gprov.return_value = mock_prov
creds = ['foo_type', 'role1']
diff --git a/test-requirements.txt b/test-requirements.txt
index 37644d0..e33f207 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,11 +2,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
-# needed for doc build
-sphinx>=1.6.2 # BSD
-openstackdocstheme>=1.17.0 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
mock>=2.0.0 # BSD
coverage!=4.4,>=4.0 # Apache-2.0
-oslotest>=1.10.0 # Apache-2.0
+oslotest>=3.2.0 # Apache-2.0
flake8-import-order==0.11 # LGPLv3
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
deleted file mode 100755
index 1f2b88b..0000000
--- a/tools/find_stack_traces.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import gzip
-import pprint
-import re
-import sys
-
-import six
-import six.moves.urllib.request as urlreq
-
-
-pp = pprint.PrettyPrinter()
-
-NOVA_TIMESTAMP = r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d"
-
-NOVA_REGEX = r"(?P<timestamp>%s) (?P<pid>\d+ )?(?P<level>(ERROR|TRACE)) " \
- "(?P<module>[\w\.]+) (?P<msg>.*)" % (NOVA_TIMESTAMP)
-
-
-class StackTrace(object):
- timestamp = None
- pid = None
- level = ""
- module = ""
- msg = ""
-
- def __init__(self, timestamp=None, pid=None, level="", module="",
- msg=""):
- self.timestamp = timestamp
- self.pid = pid
- self.level = level
- self.module = module
- self.msg = msg
-
- def append(self, msg):
- self.msg = self.msg + msg
-
- def is_same(self, data):
- return (data['timestamp'] == self.timestamp and
- data['level'] == self.level)
-
- def not_none(self):
- return self.timestamp is not None
-
- def __str__(self):
- buff = "<%s %s %s>\n" % (self.timestamp, self.level, self.module)
- for line in self.msg.splitlines():
- buff = buff + line + "\n"
- return buff
-
-
-def hunt_for_stacktrace(url):
- """Return TRACE or ERROR lines out of logs."""
- req = urlreq.Request(url)
- req.add_header('Accept-Encoding', 'gzip')
- page = urlreq.urlopen(req)
- buf = six.StringIO(page.read())
- f = gzip.GzipFile(fileobj=buf)
- content = f.read()
-
- traces = []
- trace = StackTrace()
- for line in content.splitlines():
- m = re.match(NOVA_REGEX, line)
- if m:
- data = m.groupdict()
- if trace.not_none() and trace.is_same(data):
- trace.append(data['msg'] + "\n")
- else:
- trace = StackTrace(
- timestamp=data.get('timestamp'),
- pid=data.get('pid'),
- level=data.get('level'),
- module=data.get('module'),
- msg=data.get('msg'))
-
- else:
- if trace.not_none():
- traces.append(trace)
- trace = StackTrace()
-
- # once more at the end to pick up any stragglers
- if trace.not_none():
- traces.append(trace)
-
- return traces
-
-
-def log_url(url, log):
- return "%s/%s" % (url, log)
-
-
-def collect_logs(url):
- page = urlreq.urlopen(url)
- content = page.read()
- logs = re.findall('(screen-[\w-]+\.txt\.gz)</a>', content)
- return logs
-
-
-def usage():
- print("""
-Usage: find_stack_traces.py <logurl>
-
-Hunts for stack traces in a devstack run. Must provide it a base log url
-from a tempest devstack run. Should start with http and end with /logs/.
-
-Returns a report listing stack traces out of the various files where
-they are found.
-""")
- sys.exit(0)
-
-
-def print_stats(items, fname, verbose=False):
- errors = len([x for x in items if x.level == "ERROR"])
- traces = len([x for x in items if x.level == "TRACE"])
- print("%d ERRORS found in %s" % (errors, fname))
- print("%d TRACES found in %s" % (traces, fname))
-
- if verbose:
- for item in items:
- print(item)
- print("\n\n")
-
-
-def main():
- if len(sys.argv) == 2:
- url = sys.argv[1]
- loglist = collect_logs(url)
-
- # probably wrong base url
- if not loglist:
- usage()
-
- for log in loglist:
- logurl = log_url(url, log)
- traces = hunt_for_stacktrace(logurl)
-
- if traces:
- print_stats(traces, log, verbose=True)
-
- else:
- usage()
-
-if __name__ == '__main__':
- main()
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 99df0d1..dd05438 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -28,12 +28,12 @@
try:
# For Python 3.0 and later
- from urllib.error import HTTPError as HTTPError
+ from urllib.error import HTTPError
import urllib.request as urllib
except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as urllib
- from urllib2 import HTTPError as HTTPError
+ from urllib2 import HTTPError
url = 'https://review.openstack.org/projects/'
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
deleted file mode 100755
index 43468e4..0000000
--- a/tools/tox_install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-
-# Client constraint file contains this client version pin that is in conflict
-# with installing the client from source. We should remove the version pin in
-# the constraints file before applying it for from-source installation.
-
-CONSTRAINTS_FILE=$1
-shift 1
-
-set -e
-
-# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
-# published to logs.openstack.org for easy debugging.
-localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
-
-if [[ $CONSTRAINTS_FILE != http* ]]; then
- CONSTRAINTS_FILE=file://$CONSTRAINTS_FILE
-fi
-# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
-curl $CONSTRAINTS_FILE --insecure --progress-bar --output $localfile
-
-pip install -c$localfile openstack-requirements
-
-# This is the main purpose of the script: Allow local installation of
-# the current repo. It is listed in constraints file and thus any
-# install will be constrained and we need to unconstrain it.
-edit-constraints $localfile -- $CLIENT_NAME
-
-pip install -c$localfile -U $*
-exit $?
diff --git a/tox.ini b/tox.ini
index 21696eb..892b6f4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,9 +8,8 @@
setenv =
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
- BRANCH_NAME=master
- CLIENT_NAME=tempest
deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
[testenv]
@@ -18,14 +17,12 @@
VIRTUAL_ENV={envdir}
OS_LOG_CAPTURE=1
PYTHONWARNINGS=default::DeprecationWarning
- BRANCH_NAME=master
- CLIENT_NAME=tempest
passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
-install_command =
- {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+install_command = pip install {opts} {packages}
whitelist_externals = *
deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
@@ -36,7 +33,17 @@
commands = oslo-config-generator --config-file tempest/cmd/config-generator.tempest.conf
[testenv:cover]
-commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
+setenv =
+ {[testenv]setenv}
+ PYTHON=coverage run --source tempest --parallel-mode
+commands =
+ coverage erase
+ find . -type f -name "*.pyc" -delete
+ stestr --test-path ./tempest/tests run {posargs}
+ coverage combine
+ coverage html -d cover
+ coverage xml -o cover/coverage.xml
+ coverage report
[testenv:all]
envdir = .tox/tempest
@@ -50,17 +57,6 @@
find . -type f -name "*.pyc" -delete
tempest run --regex {posargs}
-[testenv:ostestr]
-sitepackages = {[tempestenv]sitepackages}
-# 'all' includes slow tests
-setenv =
- {[tempestenv]setenv}
- OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
-deps = {[tempestenv]deps}
-commands =
- find . -type f -name "*.pyc" -delete
- ostestr {posargs}
-
[testenv:all-plugin]
sitepackages = True
# 'all' includes slow tests
@@ -127,6 +123,10 @@
tempest run --serial --regex '\[.*\bsmoke\b.*\]' {posargs}
[testenv:venv]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
commands = {posargs}
[testenv:venv-tempest]
@@ -137,9 +137,14 @@
commands = {posargs}
[testenv:docs]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
commands =
- rm -rf doc/build
- python setup.py build_sphinx {posargs}
+ rm -rf doc/build
+ sphinx-build -b html doc/source doc/build/html
+whitelist_externals = rm
[testenv:pep8]
commands =
@@ -165,9 +170,15 @@
import-order-style = pep8
[testenv:releasenotes]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
commands =
- rm -rf releasenotes/build
- sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+ rm -rf releasenotes/build
+ sphinx-build -a -E -W -d releasenotes/build/doctrees \
+ -b html releasenotes/source releasenotes/build/html
+whitelist_externals = rm
[testenv:pip-check-reqs]
# Do not install test-requirements as that will pollute the virtualenv for