Merge "Adding test case for migration policy 'never'"
diff --git a/.zuul.yaml b/.zuul.yaml
index 3fee6ff..8ab3028 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,7 +1,13 @@
- job:
name: devstack-tempest
parent: devstack
- description: Base Tempest job.
+ nodeset: openstack-single-node
+ description: |
+ Base Tempest job.
+
+ This Tempest job provides the base for both the single and multi-node
+ test setup. To run a multi-node test inherit from devstack-tempest and
+ set the nodeset to a multi-node one.
required-projects:
- openstack/tempest
timeout: 7200
@@ -10,6 +16,11 @@
vars:
devstack_services:
tempest: true
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute:
+ min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
test_results_stage_name: 'test_results'
zuul_copy_output:
'{{ devstack_base_dir }}/tempest/etc/tempest.conf': 'logs'
@@ -81,6 +92,36 @@
# without Swift, c-bak cannot run (in the Gate at least)
c-bak: false
+- job:
+ name: tempest-multinode-full
+ parent: devstack-tempest
+ nodeset: openstack-two-node
+ # Until the devstack changes are backported, only run this on master
+ branches:
+ - master
+ description: |
+ Base multinode integration test with Neutron networking and py27.
+ Former names for this job where:
+ * neutron-tempest-multinode-full
+ * legacy-tempest-dsvm-neutron-multinode-full
+ * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+ This job includes two nodes, controller / tempest plus a subnode, but
+ it can be used with different topologies, as long as a controller node
+ and a tempest one exist.
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ FORCE_CONFIG_DRIVE: False
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+ group-vars:
+ peers:
+ devstack_localrc:
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+
- nodeset:
name: openstack-bionic-node
nodes:
@@ -284,6 +325,16 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - tempest-multinode-full:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
- tempest-tox-plugin-sanity-check
- tempest-scenario-multinode-lvm-multibackend:
voting: false
@@ -296,21 +347,21 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - nova-cells-v1:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
gate:
jobs:
- nova-multiattach
experimental:
jobs:
- - nova-cells-v1:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- nova-live-migration:
irrelevant-files:
- ^(test-|)requirements.txt$
diff --git a/HACKING.rst b/HACKING.rst
index 1c084f8..bb55ac5 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -419,34 +419,3 @@
tested is considered stable and adheres to the OpenStack API stability
guidelines. If an API is still considered experimental or in development then
it should not be tested by Tempest until it is considered stable.
-
-Stable Support Policy
----------------------
-
-Since the `Extended Maintenance policy`_ for stable branches was adopted
-OpenStack projects will keep stable branches around after a "stable" or
-"maintained" period for a phase of indeterminate length called "Extended
-Maintenance". Prior to this resolution Tempest supported all stable branches
-which were supported upstream. This policy does not scale under the new model
-as Tempest would be responsible for gating proposed changes against an ever
-increasing number of branches. Therefore due to resource constraints, Tempest
-will only provide support for branches in the "Maintained" phase from the
-documented `Support Phases`_. When a branch moves from the *Maintained* to the
-*Extended Maintenance* phase, Tempest will tag the removal of support for that
-branch as it has in the past when a branch goes end of life.
-
-The expectation for *Extended Maintenance* phase branches is that they will continue
-running Tempest during that phase of support. Since the REST APIs are stable
-interfaces across release boundaries, branches in these phases should run
-Tempest from master as long as possible. But, because we won't be actively
-testing branches in these phases, it's possible that we'll introduce changes to
-Tempest on master which will break support on *Extended Maintenance* phase
-branches. When this happens the expectation for those branches is to either
-switch to running Tempest from a tag with support for the branch, or blacklist
-a newly introduced test (if that is the cause of the issue). Tempest will not
-be creating stable branches to support *Extended Maintenance* phase branches, as
-the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
-project, to support that branch.
-
-.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
-.. _Support Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f562850..fecf98a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -80,6 +80,14 @@
library
+Support Policy
+--------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_support_policy
+
Indices and tables
==================
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index f2c51b3..ea868ae 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -372,7 +372,11 @@
* `2.60`_
- .. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id54
+ .. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-queens
+
+ * `2.63`_
+
+ .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
* Volume
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
new file mode 100644
index 0000000..87e3ad1
--- /dev/null
+++ b/doc/source/stable_branch_support_policy.rst
@@ -0,0 +1,30 @@
+Stable Branch Support Policy
+============================
+
+Since the `Extended Maintenance policy`_ for stable branches was adopted
+OpenStack projects will keep stable branches around after a "stable" or
+"maintained" period for a phase of indeterminate length called "Extended
+Maintenance". Prior to this resolution Tempest supported all stable branches
+which were supported upstream. This policy does not scale under the new model
+as Tempest would be responsible for gating proposed changes against an ever
+increasing number of branches. Therefore due to resource constraints, Tempest
+will only provide support for branches in the "Maintained" phase from the
+documented `Support Phases`_. When a branch moves from the *Maintained* to the
+*Extended Maintenance* phase, Tempest will tag the removal of support for that
+branch as it has in the past when a branch goes end of life.
+
+The expectation for *Extended Maintenance* phase branches is that they will continue
+running Tempest during that phase of support. Since the REST APIs are stable
+interfaces across release boundaries, branches in these phases should run
+Tempest from master as long as possible. But, because we won't be actively
+testing branches in these phases, it's possible that we'll introduce changes to
+Tempest on master which will break support on *Extended Maintenance* phase
+branches. When this happens the expectation for those branches is to either
+switch to running Tempest from a tag with support for the branch, or blacklist
+a newly introduced test (if that is the cause of the issue). Tempest will not
+be creating stable branches to support *Extended Maintenance* phase branches, as
+the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
+project, to support that branch.
+
+.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
+.. _Support Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index a684984..01155a8 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -3,7 +3,7 @@
# avoid zuul retrying on legitimate failures.
- hosts: all
roles:
- - run-devstack
+ - orchestrate-devstack
# We run tests only on one node, regardless how many nodes are in the system
- hosts: tempest
diff --git a/playbooks/post-tempest.yaml b/playbooks/post-tempest.yaml
index 4dde2c9..6e0bcad 100644
--- a/playbooks/post-tempest.yaml
+++ b/playbooks/post-tempest.yaml
@@ -1,4 +1,4 @@
-- hosts: all
+- hosts: tempest
become: true
roles:
- role: fetch-subunit-output
diff --git a/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml b/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
index fc061bc..9ae46fd 100644
--- a/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
+++ b/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
@@ -6,4 +6,4 @@
The lack of these additional content-type will cause defcore test
to fail for OpenStack public cloud which uses tomcat module in the
api gateway. The additions are ``application/json;charset=utf-8``,
- ``text/html;charset=utf-8``,``text/plain;charset=utf-8``
\ No newline at end of file
+ ``text/html;charset=utf-8``, ``text/plain;charset=utf-8``
diff --git a/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml b/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
index 73900ca..e9c3694 100644
--- a/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
+++ b/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
@@ -1,6 +1,6 @@
---
features:
- |
- Adds a new cli option to tempest run, --combine, which is used to indicate
- you want the subunit stream output combined with the previous run's in
- the testr repository
+ Adds a new cli option to tempest run, ``--combine``, which is used
+ to indicate you want the subunit stream output combined with the
+ previous run's in the testr repository
diff --git a/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
index 9d7102f..5b4a96d 100644
--- a/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
+++ b/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
@@ -1,5 +1,5 @@
---
upgrade:
- |
- The deprecated config option 'allow_port_security_disabled' from compute_feature_enabled
- group has been removed.
+ The deprecated config option ``allow_port_security_disabled`` from
+ ``compute_feature_enabled`` group has been removed.
diff --git a/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
index b4e4dd1..c8b0ca8 100644
--- a/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
+++ b/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
@@ -8,4 +8,4 @@
- ``compute.ssh_user`` (available as ``validation.image_ssh_user``)
- ``scenario.ssh_user`` (available as ``validation.image_ssh_user``)
- ``compute.network_for_ssh`` (available as ``validation.network_for_ssh``)
- - ``compute.ping_timeout `` (available as ``validation.ping_timeout``)
+ - ``compute.ping_timeout`` (available as ``validation.ping_timeout``)
diff --git a/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml b/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml
new file mode 100644
index 0000000..03d0ae8
--- /dev/null
+++ b/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add ``enable_service``, ``disable_service`` , ``disable_log_reason``,
+ ``freeze_host`` and ``thaw_host`` API endpoints to volume v3
+ ``services_client``.
diff --git a/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
index 403bbad..145e7dd 100644
--- a/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
+++ b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
@@ -1,7 +1,7 @@
---
features:
- |
- Adds a new cli option to tempest run, --load-list <list-file>
+ Adds a new cli option to tempest run, ``--load-list <list-file>``
to specify target tests to run from a list-file. The list-file
- supports the output format of the tempest run --list-tests
+ supports the output format of the tempest run ``--list-tests``
command.
diff --git a/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml b/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
index b54ee8b..19d47d1 100644
--- a/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
+++ b/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
@@ -1,11 +1,9 @@
---
-prelude: >
- When using OVS HW offload feature we need to create
- Neutron port with a certain capability. This is done
- by creating Neutron port with binding profile. To be
- able to test this we need profile capability support
- in Tempest as well.
features:
- A new config option 'port_profile' is added to the section
'network' to specify capabilities of the port.
- By default this is set to {}.
+ By default this is set to {}. When using OVS HW offload
+ feature we need to create Neutron port with a certain
+ capability. This is done by creating Neutron port with
+ binding profile. To be able to test this we need profile
+ capability support in Tempest as well.
diff --git a/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml b/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
index 8fdf4f0..abd2610 100644
--- a/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
+++ b/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
@@ -1,4 +1,5 @@
---
features:
- |
- Add --save-state option to allow saving state of cloud before tempest run.
+ Add ``--save-state`` option to allow saving state of cloud before
+ tempest run.
diff --git a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
index 406e282..6c44ba0 100644
--- a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
+++ b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
@@ -3,5 +3,5 @@
- |
Add extension API show quota details to network quotas_client library.
This feature enables the possibility to show a quota set for a specified
- project that includes the quota’s used, limit and reserved counts for per
- resource
+ project that includes the quota's used, limit and reserved counts per
+ resource.
diff --git a/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
index e3443c8..2a0a86c 100644
--- a/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
+++ b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
@@ -2,8 +2,8 @@
other:
- |
The CLIClient class, when it calls a command line client, uses
- --os-project-name instead of --os-tenant-name for the project, and
- passes --os-identity-api-version (default empty).
+ ``--os-project-name`` instead of ``--os-tenant-name`` for the
+ project, and passes ``--os-identity-api-version`` (default empty).
All CLI clients still available in supported releases of OpenStack
- which are wrapped by the cmd_with_auth() method support those
+ which are wrapped by the ``cmd_with_auth()`` method support those
switches.
diff --git a/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml b/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml
index 265853d..0f9a0f6 100644
--- a/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml
+++ b/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml
@@ -1,8 +1,8 @@
---
features:
- |
- Adds a new CLI arg in tempest run, --black-regex, which is a regex to
- exclude the tests that match it.
+ Adds a new CLI arg in tempest run, ``--black-regex``, which is a
+ regex to exclude the tests that match it.
fixes:
- |
Fixes tempest run CLI args mutually exclusive behavior which should not
diff --git a/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml b/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml
index ec21098..c69ed50 100644
--- a/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml
+++ b/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml
@@ -1,5 +1,5 @@
---
features:
- |
- Added tempest workspace remove --name <workspace_name> --rmdir
+ Added tempest workspace remove ``--name <workspace_name> --rmdir``
feature to delete the workspace directory as well as entry.
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index b377c0c..d0c1973 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -108,6 +108,35 @@
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
+ cls._check_depends_on_nova_network()
+
+ @classmethod
+ def _check_depends_on_nova_network(cls):
+ # Since nova-network APIs were removed from Nova in the Rocky release,
+ # determine, based on the max version from the version document, if
+ # the compute API is >Queens and if so, skip tests that rely on
+ # nova-network.
+ if not getattr(cls, 'depends_on_nova_network', False):
+ return
+ versions = cls.versions_client.list_versions()['versions']
+ # Find the v2.1 version which will tell us our max version for the
+ # compute API we're testing against.
+ for version in versions:
+ if version['id'] == 'v2.1':
+ max_version = api_version_request.APIVersionRequest(
+ version['version'])
+ break
+ else:
+ LOG.warning(
+ 'Unable to determine max v2.1 compute API version: %s',
+ versions)
+ return
+
+ # The max compute API version in Queens is 2.60 so we cap
+ # at that version.
+ queens = api_version_request.APIVersionRequest('2.60')
+ if max_version > queens:
+ raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 3a85a86..ff8ed61 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -13,7 +13,6 @@
# under the License.
import json
-import time
from oslo_log import log as logging
@@ -80,9 +79,11 @@
return True
cmd = 'curl %s' % md_url
md_json = ssh_client.exec_command(cmd)
- verify_method(md_json)
- return True
-
+ return verify_method(md_json)
+ # NOTE(gmann) Keep refreshing the metadata info until the metadata
+ # cache is refreshed. For safer side, we will go with wait loop of
+ # build_interval till build_timeout. verify_method() above will return
+ # True if all metadata verification is done as expected.
if not test_utils.call_until_true(get_and_verify_metadata,
CONF.compute.build_timeout,
CONF.compute.build_interval):
@@ -122,16 +123,20 @@
if d['mac'] == self.net_2_200_mac:
self.assertEqual(d['tags'], ['net-2-200'])
- # A hypervisor may present multiple paths to a tagged disk, so
- # there may be duplicated tags in the metadata, use set() to
- # remove duplicated tags.
- # Some hypervisors might report devices with no tags as well.
- found_devices = [d['tags'][0] for d in md_dict['devices']
- if d.get('tags')]
+ # A hypervisor may present multiple paths to a tagged disk, so
+ # there may be duplicated tags in the metadata, use set() to
+ # remove duplicated tags.
+ # Some hypervisors might report devices with no tags as well.
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
+ try:
self.assertEqual(set(found_devices), set(['port-1', 'port-2',
'net-1', 'net-2-100',
'net-2-200', 'boot',
'other']))
+ return True
+ except Exception:
+ return False
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
@utils.services('network', 'volume', 'image')
@@ -302,12 +307,21 @@
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
- found_devices = [d['tags'][0] for d in md_dict['devices']]
- self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
+ try:
+ self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+ return True
+ except Exception:
+ return False
def verify_empty_devices(self, md_json):
md_dict = json.loads(md_json)
- self.assertEmpty(md_dict['devices'])
+ try:
+ self.assertEmpty(md_dict['devices'])
+ return True
+ except Exception:
+ return False
@decorators.idempotent_id('3e41c782-2a89-4922-a9d2-9a188c4e7c7c')
@utils.services('network', 'volume', 'image')
@@ -354,10 +368,6 @@
server=server,
servers_client=self.servers_client)
- # NOTE(artom) The newly attached tagged nic won't appear in the
- # metadata until the cache is refreshed. We wait 16 seconds since the
- # default cache expiry is 15 seconds.
- time.sleep(16)
self.verify_metadata_from_api(server, ssh_client,
self.verify_device_metadata)
@@ -370,7 +380,5 @@
waiters.wait_for_interface_detach(self.interfaces_client,
server['id'],
interface['port_id'])
- # NOTE(artom) More waiting until metadata cache is refreshed.
- time.sleep(16)
self.verify_metadata_from_api(server, ssh_client,
self.verify_empty_devices)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index ec7b505..9fc5af0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -393,7 +393,7 @@
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
- name=backup1).response
+ name=backup1)
oldest_backup_exist = True
# the oldest one should be deleted automatically in this test
@@ -409,10 +409,10 @@
"deleted during rotation.", oldest_backup)
if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ "OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image1_id = resp['image_id']
else:
- image1_id = data_utils.parse_image_id(resp['location'])
+ image1_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
waiters.wait_for_image_status(glance_client,
image1_id, 'active')
@@ -422,12 +422,12 @@
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
- name=backup2).response
+ name=backup2)
if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ "OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image2_id = resp['image_id']
else:
- image2_id = data_utils.parse_image_id(resp['location'])
+ image2_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(glance_client.delete_image, image2_id)
waiters.wait_for_image_status(glance_client,
image2_id, 'active')
@@ -465,12 +465,12 @@
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
- name=backup3).response
+ name=backup3)
if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ "OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image3_id = resp['image_id']
else:
- image3_id = data_utils.parse_image_id(resp['location'])
+ image3_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(glance_client.delete_image, image3_id)
# the first back up should be deleted
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 543fa1c..56d973e 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -209,3 +209,34 @@
server['id'], 'ACTIVE')
# Checking list details API response schema
self.servers_client.list_servers(detail=True)
+
+
+class ServerShowV263Test(base.BaseV2ComputeTest):
+ min_microversion = '2.63'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
+ def test_show_update_rebuild_list_server(self):
+ trusted_certs = ['test-cert-1', 'test-cert-2']
+ server = self.create_test_server(
+ trusted_image_certificates=trusted_certs,
+ wait_until='ACTIVE')
+
+ # Check show API response schema
+ self.servers_client.show_server(server['id'])['server']
+
+ # Check update API response schema
+ self.servers_client.update_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ # Check rebuild API response schema
+ self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ # Check list details API response schema
+ params = {'trusted_image_certificates': trusted_certs}
+ servers = self.servers_client.list_servers(
+ detail=True, **params)['servers']
+ self.assertNotEmpty(servers)
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 90f04ff..5fb1711 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -25,8 +25,12 @@
CONF = config.CONF
+# TODO(mriedem): Remove this test class once the nova queens branch goes into
+# extended maintenance mode.
class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
+ depends_on_nova_network = True
+
@classmethod
def setup_credentials(cls):
# This test needs a network and a subnet
@@ -50,8 +54,6 @@
# for a given server_id
if CONF.service_available.neutron:
- # TODO(mriedem): After a microversion implements the API for
- # neutron, a 400 should be a failure for nova-network and neutron.
with testtools.ExpectedException(exceptions.BadRequest):
self.client.list_virtual_interfaces(self.server['id'])
else:
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index c4e2400..ec4d7a8 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -20,8 +20,12 @@
from tempest.lib import exceptions as lib_exc
+# TODO(mriedem): Remove this test class once the nova queens branch goes into
+# extended maintenance mode.
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
+ depends_on_nova_network = True
+
@classmethod
def setup_credentials(cls):
# For this test no network resources are needed
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 69cac33..62ced19 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -384,12 +384,23 @@
rules = self.roles_client.list_all_role_inference_rules()[
'role_inferences']
+
+ # NOTE(jaosorior): With the work related to the define-default-roles
+ # blueprint, we now have 'admin', 'member' and 'reader' by default. So
+ # we filter every other implied role to only take into account the ones
+ # relates to this test.
+ relevant_roles = (self.roles[0]['id'], self.roles[1]['id'],
+ self.roles[2]['id'], self.role['id'])
+
+ def is_implied_role_relevant(rule):
+ return any(r for r in rule['implies'] if r['id'] in relevant_roles)
+
+ relevant_rules = filter(is_implied_role_relevant, rules)
# Sort the rules by the number of inferences, since there should be 1
# inference between "roles[2]" and "role" and 2 inferences for
# "roles[0]": between "roles[1]" and "roles[2]".
- sorted_rules = sorted(rules, key=lambda r: len(r['implies']))
+ sorted_rules = sorted(relevant_rules, key=lambda r: len(r['implies']))
- # Check that 2 sets of rules are returned.
self.assertEqual(2, len(sorted_rules))
# Check that only 1 inference rule exists between "roles[2]" and "role"
self.assertEqual(1, len(sorted_rules[0]['implies']))
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index 5aa337c..be0c4c6 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -22,7 +22,6 @@
class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
- force_tenant_isolation = True
credentials = ['primary', 'alt', 'admin']
@classmethod
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index ef4a23a..b4bb88e 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -15,6 +15,7 @@
from tempest.api.network import base
from tempest.common import utils
+from tempest.common.utils import data_utils
from tempest.common.utils import net_utils
from tempest import config
from tempest.lib import decorators
@@ -158,11 +159,21 @@
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
- network2 = self.create_network()
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network2 = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network2['id'])
subnet2 = self.create_subnet(network2)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])
router2 = self.create_router(external_network_id=self.ext_net_id)
+ self.addCleanup(self.routers_client.delete_router, router2['id'])
self.create_router_interface(router2['id'], subnet2['id'])
+ self.addCleanup(self.routers_client.remove_router_interface,
+ router2['id'], subnet_id=subnet2['id'])
port_other_router = self.create_port(network2)
+ self.addCleanup(self.ports_client.delete_port,
+ port_other_router['id'])
# Associate floating IP to the other port on another router
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5168423..246a5c3 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -52,6 +52,21 @@
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
+ def _create_subnet(self, network, gateway='',
+ cidr=None, mask_bits=None, **kwargs):
+ subnet = self.create_subnet(network, gateway, cidr, mask_bits)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ return subnet
+
+ def _create_network(self, network_name=None, **kwargs):
+ network_name = network_name or data_utils.rand_name(
+ self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name, **kwargs)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ return network
+
@decorators.attr(type='smoke')
@decorators.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
@@ -73,7 +88,7 @@
@decorators.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
- network2 = self.create_network()
+ network2 = self._create_network()
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.ports_client.create_bulk_ports(ports=port_list)
@@ -90,7 +105,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
- network = self.create_network()
+ network = self._create_network()
net_id = network['id']
address = self.cidr
address.prefixlen = self.mask_bits
@@ -100,10 +115,9 @@
raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
'end': str(address[-2])}]}
- subnet = self.create_subnet(network, cidr=address,
- mask_bits=address.prefixlen,
- **allocation_pools)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ self._create_subnet(network, cidr=address,
+ mask_bits=address.prefixlen,
+ **allocation_pools)
body = self.ports_client.create_port(network_id=net_id)
self.addCleanup(self.ports_client.delete_port, body['port']['id'])
port = body['port']
@@ -153,9 +167,8 @@
@decorators.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
def test_port_list_filter_by_ip(self):
# Create network and subnet
- network = self.create_network()
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ network = self._create_network()
+ self._create_subnet(network)
# Create two ports
port_1 = self.ports_client.create_port(network_id=network['id'])
self.addCleanup(self.ports_client.delete_port, port_1['port']['id'])
@@ -187,10 +200,8 @@
'ip-substring-filtering extension not enabled.')
def test_port_list_filter_by_ip_substr(self):
# Create network and subnet
- network = self.create_network()
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
-
+ network = self._create_network()
+ subnet = self._create_subnet(network)
# Get two IP addresses
ip_address_1 = None
ip_address_2 = None
@@ -261,10 +272,8 @@
@decorators.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
- network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ network = self._create_network()
+ self._create_subnet(network)
router = self.create_router()
self.addCleanup(self.routers_client.delete_router, router['id'])
port = self.ports_client.create_port(network_id=network['id'])
@@ -294,12 +303,9 @@
@decorators.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
- network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
- subnet_1 = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet_1['id'])
- subnet_2 = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet_2['id'])
+ network = self._create_network()
+ subnet_1 = self._create_subnet(network)
+ subnet_2 = self._create_subnet(network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
@@ -323,8 +329,7 @@
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
- subnet_1 = self.create_subnet(self.network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet_1['id'])
+ subnet_1 = self._create_subnet(self.network)
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
@@ -413,10 +418,8 @@
utils.is_extension_enabled('security-group', 'network'),
'security-group extension not enabled.')
def test_create_port_with_no_securitygroups(self):
- network = self.create_network()
- self.addCleanup(self.networks_client.delete_network, network['id'])
- subnet = self.create_subnet(network)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ network = self._create_network()
+ self._create_subnet(network)
port = self.create_port(network, security_groups=[])
self.addCleanup(self.ports_client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index abbb779..3ff12e4 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -39,6 +39,11 @@
self.addCleanup(self._cleanup_router, router)
return router
+ def _create_subnet(self, network, gateway='', cidr=None):
+ subnet = self.create_subnet(network, gateway, cidr)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ return subnet
+
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
interface = self.routers_client.add_router_interface(
router_id, subnet_id=subnet_id)
@@ -65,12 +70,12 @@
'The public_network_id option must be specified.')
def test_create_show_list_update_delete_router(self):
# Create a router
- name = data_utils.rand_name(self.__class__.__name__ + '-router')
+ router_name = data_utils.rand_name(self.__class__.__name__ + '-router')
router = self._create_router(
- name=name,
+ name=router_name,
admin_state_up=False,
external_network_id=CONF.network.public_network_id)
- self.assertEqual(router['name'], name)
+ self.assertEqual(router['name'], router_name)
self.assertEqual(router['admin_state_up'], False)
self.assertEqual(
router['external_gateway_info']['network_id'],
@@ -97,8 +102,12 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
def test_add_remove_router_interface_with_subnet_id(self):
- network = self.create_network()
- subnet = self.create_subnet(network)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ subnet = self._create_subnet(network)
router = self._create_router()
# Add router interface with subnet id
interface = self.routers_client.add_router_interface(
@@ -116,8 +125,12 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
def test_add_remove_router_interface_with_port_id(self):
- network = self.create_network()
- self.create_subnet(network)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ self._create_subnet(network)
router = self._create_router()
port_body = self.ports_client.create_port(
network_id=network['id'])
@@ -183,13 +196,18 @@
# Update router extra route, second ip of the range is
# used as next hop
for i in range(routes_num):
- network = self.create_network()
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
subnet = self.create_subnet(network, cidr=next_cidr)
next_cidr = next_cidr.next()
# Add router interface with subnet id
self.create_router_interface(router['id'], subnet['id'])
-
+ self.addCleanup(self._remove_router_interface_with_subnet_id,
+ router['id'], subnet['id'])
cidr = netaddr.IPNetwork(subnet['cidr'])
next_hop = str(cidr[2])
destination = str(subnet['cidr'])
@@ -242,13 +260,18 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
def test_add_multiple_router_interfaces(self):
- network01 = self.create_network(
- network_name=data_utils.rand_name('router-network01-'))
- network02 = self.create_network(
- network_name=data_utils.rand_name('router-network02-'))
- subnet01 = self.create_subnet(network01)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network01 = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network01['id'])
+ network02 = self.networks_client.create_network(
+ name=data_utils.rand_name(self.__class__.__name__))['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network02['id'])
+ subnet01 = self._create_subnet(network01)
sub02_cidr = self.cidr.next()
- subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
+ subnet02 = self._create_subnet(network02, cidr=sub02_cidr)
router = self._create_router()
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
@@ -261,8 +284,12 @@
@decorators.idempotent_id('96522edf-b4b5-45d9-8443-fa11c26e6eff')
def test_router_interface_port_update_with_fixed_ip(self):
- network = self.create_network()
- subnet = self.create_subnet(network)
+ network_name = data_utils.rand_name(self.__class__.__name__)
+ network = self.networks_client.create_network(
+ name=network_name)['network']
+ self.addCleanup(self.networks_client.delete_network,
+ network['id'])
+ subnet = self._create_subnet(network)
router = self._create_router()
fixed_ip = [{'subnet_id': subnet['id']}]
interface = self._add_router_interface_with_subnet_id(router['id'],
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index 45f4caa..731a055 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -157,6 +157,57 @@
waiters.wait_for_volume_resource_status(
self.groups_client, grp2['id'], 'available')
+ @decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127')
+ @decorators.related_bug('1739031')
+ def test_delete_group_snapshots_following_updated_volumes(self):
+ volume_type = self.create_volume_type()
+
+ group_type = self.create_group_type()
+
+ # Create a volume group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Note: When dealing with consistency groups all volumes must
+ # reside on the same backend. Adding volumes to the same consistency
+ # group from multiple backends isn't supported. In order to ensure all
+ # volumes share the same backend, all volumes must share same
+ # volume-type and group id.
+ volume_list = []
+ for _ in range(2):
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+ volume_list.append(volume['id'])
+
+ for vol in volume_list:
+ self.groups_client.update_group(grp['id'],
+ remove_volumes=vol)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ self.groups_client.update_group(grp['id'],
+ add_volumes=vol)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ # Verify the created volumes are associated with consistency group
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ grp_vols = [v for v in vols if v['group_id'] == grp['id']]
+ self.assertEqual(2, len(grp_vols))
+
+ # Create a snapshot group
+ group_snapshot = self._create_group_snapshot(group_id=grp['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+
+ for snap in snapshots:
+ if snap['volume_id'] in volume_list:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Delete a snapshot group
+ self._delete_group_snapshot(group_snapshot)
+
class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
_api_version = 3
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index c0891e4..c5c70d2 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -29,6 +29,10 @@
if not CONF.volume_feature_enabled.multi_backend:
raise cls.skipException("Cinder multi-backend feature disabled")
+ if len(set(CONF.volume.backend_names)) < 2:
+ raise cls.skipException("Requires at least two different "
+ "backend names")
+
@classmethod
def resource_setup(cls):
super(VolumeMultiBackendTest, cls).resource_setup()
@@ -41,9 +45,6 @@
# Volume/Type creation (uses volume_backend_name)
# It is not allowed to create the same backend name twice
- if len(backend_names) < 2:
- raise cls.skipException("Requires at least two different "
- "backend names")
for backend_name in backend_names:
# Volume/Type creation (uses backend_name)
cls._create_type_and_volume(backend_name, False)
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 6f9daa8..e546bff 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -13,10 +13,8 @@
# under the License.
from tempest.api.volume import base
-from tempest.common import identity
from tempest.common import tempest_fixtures as fixtures
from tempest.common import waiters
-from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
@@ -25,8 +23,6 @@
class BaseVolumeQuotasAdminTestJSON(base.BaseVolumeAdminTest):
- force_tenant_isolation = True
-
credentials = ['primary', 'alt', 'admin']
def setUp(self):
@@ -45,6 +41,19 @@
cls.transfer_client = cls.os_primary.volume_transfers_v2_client
cls.alt_transfer_client = cls.os_alt.volume_transfers_v2_client
+ @classmethod
+ def resource_setup(cls):
+ super(BaseVolumeQuotasAdminTestJSON, cls).resource_setup()
+
+ # Save the current set of quotas so that some tests may use it
+ # to restore the quotas to their original values after they are
+ # done.
+ cls.original_quota_set = (cls.admin_quotas_client.show_quota_set(
+ cls.demo_tenant_id)['quota_set'])
+ cls.cleanup_quota_set = dict(
+ (k, v) for k, v in cls.original_quota_set.items()
+ if k in QUOTA_KEYS)
+
@decorators.idempotent_id('59eada70-403c-4cef-a2a3-a8ce2f1b07a0')
def test_list_quotas(self):
quotas = (self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
@@ -62,8 +71,6 @@
@decorators.idempotent_id('3d45c99e-cc42-4424-a56e-5cbd212b63a6')
def test_update_all_quota_resources_for_tenant(self):
# Admin can update all the resource quota limits for a tenant
- default_quota_set = self.admin_quotas_client.show_default_quota_set(
- self.demo_tenant_id)['quota_set']
new_quota_set = {'gigabytes': 1009,
'volumes': 11,
'snapshots': 11,
@@ -76,11 +83,9 @@
self.demo_tenant_id,
**new_quota_set)['quota_set']
- cleanup_quota_set = dict(
- (k, v) for k, v in default_quota_set.items()
- if k in QUOTA_KEYS)
self.addCleanup(self.admin_quotas_client.update_quota_set,
- self.demo_tenant_id, **cleanup_quota_set)
+ self.demo_tenant_id, **self.cleanup_quota_set)
+
# test that the specific values we set are actually in
# the final result. There is nothing here that ensures there
# would be no other values in there.
@@ -96,6 +101,25 @@
for usage_key in QUOTA_USAGE_KEYS:
self.assertIn(usage_key, quota_usage[key])
+ @decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
+ def test_delete_quota(self):
+ # Admin can delete the resource quota set for a project
+
+ self.addCleanup(self.admin_quotas_client.update_quota_set,
+ self.demo_tenant_id, **self.cleanup_quota_set)
+
+ quota_set_default = self.admin_quotas_client.show_default_quota_set(
+ self.demo_tenant_id)['quota_set']
+ volume_default = quota_set_default['volumes']
+
+ self.admin_quotas_client.update_quota_set(
+ self.demo_tenant_id, volumes=(volume_default + 5))
+
+ self.admin_quotas_client.delete_quota_set(self.demo_tenant_id)
+ quota_set_new = (self.admin_quotas_client.show_quota_set(
+ self.demo_tenant_id)['quota_set'])
+ self.assertEqual(volume_default, quota_set_new['volumes'])
+
@decorators.idempotent_id('ae8b6091-48ad-4bfa-a188-bbf5cc02115f')
def test_quota_usage(self):
quota_usage = self.admin_quotas_client.show_quota_set(
@@ -115,28 +139,6 @@
volume["size"],
new_quota_usage['gigabytes']['in_use'])
- @decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
- def test_delete_quota(self):
- # Admin can delete the resource quota set for a project
- project_name = data_utils.rand_name('quota_tenant')
- description = data_utils.rand_name('desc_')
- project = identity.identity_utils(self.os_admin).create_project(
- project_name, description=description)
- project_id = project['id']
- self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
- project_id)
- quota_set_default = self.admin_quotas_client.show_default_quota_set(
- project_id)['quota_set']
- volume_default = quota_set_default['volumes']
-
- self.admin_quotas_client.update_quota_set(
- project_id, volumes=(volume_default + 5))
-
- self.admin_quotas_client.delete_quota_set(project_id)
- quota_set_new = (self.admin_quotas_client.show_quota_set(project_id)
- ['quota_set'])
- self.assertEqual(volume_default, quota_set_new['volumes'])
-
@decorators.idempotent_id('8911036f-9d54-4720-80cc-a1c9796a8805')
def test_quota_usage_after_volume_transfer(self):
# Create a volume for transfer
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index d127b5f..f50f336 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -19,10 +19,11 @@
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
+QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
+ 'backup_gigabytes', 'per_volume_gigabytes']
class BaseVolumeQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
- force_tenant_isolation = True
@classmethod
def setup_credentials(cls):
@@ -32,11 +33,23 @@
@classmethod
def resource_setup(cls):
super(BaseVolumeQuotasNegativeTestJSON, cls).resource_setup()
+
+ # Save the current set of quotas, then set up the cleanup method
+ # to restore the quotas to their original values after the tests
+ # from this class are done. This is needed just in case Tempest is
+ # configured to use pre-provisioned projects/user accounts.
+ cls.original_quota_set = (cls.admin_quotas_client.show_quota_set(
+ cls.demo_tenant_id)['quota_set'])
+ cls.cleanup_quota_set = dict(
+ (k, v) for k, v in cls.original_quota_set.items()
+ if k in QUOTA_KEYS)
+ cls.addClassResourceCleanup(cls.admin_quotas_client.update_quota_set,
+ cls.demo_tenant_id,
+ **cls.cleanup_quota_set)
+
cls.shared_quota_set = {'gigabytes': 2 * CONF.volume.volume_size,
'volumes': 1}
- # NOTE(gfidente): no need to restore original quota set
- # after the tests as they only work with dynamic credentials.
cls.admin_quotas_client.update_quota_set(
cls.demo_tenant_id,
**cls.shared_quota_set)
diff --git a/tempest/api/volume/admin/test_volume_services_negative.py b/tempest/api/volume/admin/test_volume_services_negative.py
new file mode 100644
index 0000000..6f3dbc6
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services_negative.py
@@ -0,0 +1,65 @@
+# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class VolumeServicesNegativeTest(base.BaseVolumeAdminTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(VolumeServicesNegativeTest, cls).resource_setup()
+ cls.services = cls.admin_volume_services_client.list_services()[
+ 'services']
+ cls.host = cls.services[0]['host']
+ cls.binary = cls.services[0]['binary']
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
+ def test_enable_service_with_invalid_host(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_volume_services_client.enable_service,
+ host='invalid_host', binary=self.binary)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('c571f179-c6e6-4c50-a0ab-368b628a8ac1')
+ def test_disable_service_with_invalid_binary(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_volume_services_client.disable_service,
+ host=self.host, binary='invalid_binary')
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('77767b36-5e8f-4c68-a0b5-2308cc21ec64')
+ def test_disable_log_reason_with_no_reason(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.disable_log_reason,
+ host=self.host, binary=self.binary,
+ disabled_reason=None)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('712bfab8-1f44-4eb5-a632-fa70bf78f05e')
+ def test_freeze_host_with_invalid_host(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.freeze_host,
+ host='invalid_host')
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('7c6287c9-d655-47e1-9a11-76f6657a6dce')
+ def test_thaw_host_with_invalid_host(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.thaw_host,
+ host='invalid_host')
diff --git a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
index 0f4e90f..74eb792 100644
--- a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
@@ -19,10 +19,11 @@
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
+QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
+ 'backup_gigabytes', 'per_volume_gigabytes']
class VolumeSnapshotQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
- force_tenant_isolation = True
@classmethod
def skip_checks(cls):
@@ -38,12 +39,24 @@
@classmethod
def resource_setup(cls):
super(VolumeSnapshotQuotasNegativeTestJSON, cls).resource_setup()
+
+ # Save the current set of quotas, then set up the cleanup method
+ # to restore the quotas to their original values after the tests
+ # from this class are done. This is needed just in case Tempest is
+ # configured to use pre-provisioned projects/user accounts.
+ cls.original_quota_set = (cls.admin_quotas_client.show_quota_set(
+ cls.demo_tenant_id)['quota_set'])
+ cls.cleanup_quota_set = dict(
+ (k, v) for k, v in cls.original_quota_set.items()
+ if k in QUOTA_KEYS)
+ cls.addClassResourceCleanup(cls.admin_quotas_client.update_quota_set,
+ cls.demo_tenant_id,
+ **cls.cleanup_quota_set)
+
cls.default_volume_size = CONF.volume.volume_size
cls.shared_quota_set = {'gigabytes': 3 * cls.default_volume_size,
'volumes': 1, 'snapshots': 1}
- # NOTE(gfidente): no need to restore original quota set
- # after the tests as they only work with tenant isolation.
cls.admin_quotas_client.update_quota_set(
cls.demo_tenant_id,
**cls.shared_quota_set)
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 375aacb..45060d0 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -59,9 +59,9 @@
volume = self.create_volume()
# Create backup
backup_name = data_utils.rand_name(self.__class__.__name__ + '-Backup')
- backup = (self.create_backup(backup_client=self.admin_backups_client,
- volume_id=volume['id'],
- name=backup_name))
+ backup = self.create_backup(volume_id=volume['id'], name=backup_name)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.assertEqual(backup_name, backup['name'])
# Export Backup
@@ -103,21 +103,22 @@
self.assertIn(new_id, [b['id'] for b in backups])
# Restore backup
- restore = self.admin_backups_client.restore_backup(
- backup['id'])['restore']
- self.addCleanup(self.admin_volume_client.delete_volume,
+ restore = self.backups_client.restore_backup(backup['id'])['restore']
+ self.addCleanup(self.volumes_client.delete_volume,
restore['volume_id'])
self.assertEqual(backup['id'], restore['backup_id'])
- waiters.wait_for_volume_resource_status(self.admin_volume_client,
- restore['volume_id'],
- 'available')
+
+ # When restore operation is performed then, backup['id']
+ # goes to 'restoring' state so we need to wait for
+ # backup['id'] to become 'available'.
+ waiters.wait_for_volume_resource_status(
+ self.backups_client, backup['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, restore['volume_id'], 'available')
# Verify if restored volume is there in volume list
- volumes = self.admin_volume_client.list_volumes()['volumes']
+ volumes = self.volumes_client.list_volumes()['volumes']
self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
- waiters.wait_for_volume_resource_status(self.admin_backups_client,
- import_backup['id'],
- 'available')
@decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
def test_volume_backup_reset_status(self):
@@ -126,12 +127,12 @@
# Create a backup
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
- backup = self.create_backup(backup_client=self.admin_backups_client,
- volume_id=volume['id'],
- name=backup_name)
+ backup = self.create_backup(volume_id=volume['id'], name=backup_name)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.assertEqual(backup_name, backup['name'])
# Reset backup status to error
self.admin_backups_client.reset_backup_status(backup_id=backup['id'],
status="error")
- waiters.wait_for_volume_resource_status(self.admin_backups_client,
+ waiters.wait_for_volume_resource_status(self.backups_client,
backup['id'], 'error')
diff --git a/tempest/api/volume/test_volume_absolute_limits.py b/tempest/api/volume/test_volume_absolute_limits.py
index 4018468..00a3375 100644
--- a/tempest/api/volume/test_volume_absolute_limits.py
+++ b/tempest/api/volume/test_volume_absolute_limits.py
@@ -17,7 +17,6 @@
from tempest import config
from tempest.lib import decorators
-
CONF = config.CONF
@@ -32,9 +31,16 @@
@classmethod
def resource_setup(cls):
super(AbsoluteLimitsTests, cls).resource_setup()
+
# Create a shared volume for tests
cls.volume = cls.create_volume()
+ @classmethod
+ def skip_checks(cls):
+ super(AbsoluteLimitsTests, cls).skip_checks()
+ if not CONF.auth.use_dynamic_credentials:
+ raise cls.skipException("Must use dynamic credentials.")
+
@decorators.idempotent_id('8e943f53-e9d6-4272-b2e9-adcf2f7c29ad')
def test_get_volume_absolute_limits(self):
# get volume limit for a tenant
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4108da5..75e81b7 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common import waiters
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -43,6 +44,9 @@
transfer = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']
transfer_id = transfer['id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_volume_transfer,
+ transfer_id)
auth_key = transfer['auth_key']
waiters.wait_for_volume_resource_status(
self.volumes_client, volume['id'], 'awaiting-transfer')
@@ -81,6 +85,9 @@
# Create a volume transfer
transfer_id = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']['id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_volume_transfer,
+ transfer_id)
waiters.wait_for_volume_resource_status(
self.volumes_client, volume['id'], 'awaiting-transfer')
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 07cfad5..c178272 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -117,6 +117,8 @@
self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'],
name=backup_name, force=True)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
self.assertEqual(backup_name, backup['name'])
@decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
@@ -132,6 +134,8 @@
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
@@ -160,6 +164,8 @@
# Create volume and backup
volume = self.create_volume()
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Update backup and assert response body for update_backup method
update_kwargs = {
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 52114bc..93638b8 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -163,6 +164,8 @@
backup = self.create_backup(volume_id=self.volume_origin['id'],
snapshot_id=snapshot['id'])
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot['id'], 'available')
backup_info = self.backups_client.show_backup(backup['id'])['backup']
self.assertEqual(self.volume_origin['id'], backup_info['volume_id'])
self.assertEqual(snapshot['id'], backup_info['snapshot_id'])
diff --git a/tempest/clients.py b/tempest/clients.py
index 707127c..2a07be9 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -241,32 +241,32 @@
# if only api_v3 is enabled, all these clients should be available
if (CONF.volume_feature_enabled.api_v2 or
CONF.volume_feature_enabled.api_v3):
- self.backups_v2_client = self.volume_v2.BackupsClient()
+ self.backups_v2_client = self.volume_v3.BackupsClient()
self.encryption_types_v2_client = \
- self.volume_v2.EncryptionTypesClient()
+ self.volume_v3.EncryptionTypesClient()
self.snapshot_manage_v2_client = \
- self.volume_v2.SnapshotManageClient()
- self.snapshots_v2_client = self.volume_v2.SnapshotsClient()
+ self.volume_v3.SnapshotManageClient()
+ self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
self.volume_capabilities_v2_client = \
- self.volume_v2.CapabilitiesClient()
- self.volume_manage_v2_client = self.volume_v2.VolumeManageClient()
- self.volume_qos_v2_client = self.volume_v2.QosSpecsClient()
- self.volume_services_v2_client = self.volume_v2.ServicesClient()
- self.volume_types_v2_client = self.volume_v2.TypesClient()
- self.volume_hosts_v2_client = self.volume_v2.HostsClient()
- self.volume_quotas_v2_client = self.volume_v2.QuotasClient()
+ self.volume_v3.CapabilitiesClient()
+ self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
+ self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
+ self.volume_services_v2_client = self.volume_v3.ServicesClient()
+ self.volume_types_v2_client = self.volume_v3.TypesClient()
+ self.volume_hosts_v2_client = self.volume_v3.HostsClient()
+ self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
self.volume_quota_classes_v2_client = \
- self.volume_v2.QuotaClassesClient()
+ self.volume_v3.QuotaClassesClient()
self.volume_scheduler_stats_v2_client = \
- self.volume_v2.SchedulerStatsClient()
+ self.volume_v3.SchedulerStatsClient()
self.volume_transfers_v2_client = \
- self.volume_v2.TransfersClient()
+ self.volume_v3.TransfersClient()
self.volume_v2_availability_zone_client = \
- self.volume_v2.AvailabilityZoneClient()
- self.volume_v2_limits_client = self.volume_v2.LimitsClient()
- self.volumes_v2_client = self.volume_v2.VolumesClient()
+ self.volume_v3.AvailabilityZoneClient()
+ self.volume_v2_limits_client = self.volume_v3.LimitsClient()
+ self.volumes_v2_client = self.volume_v3.VolumesClient()
self.volumes_v2_extension_client = \
- self.volume_v2.ExtensionsClient()
+ self.volume_v3.ExtensionsClient()
# Set default client for users that don't need explicit version
self.volumes_client_latest = self.volumes_v2_client
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 72ee715..a27425c 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -244,7 +244,7 @@
'each newline')
parser.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
- 'this file contains a seperate test '
+ 'this file contains a separate test '
'on each newline. This command'
'supports files created by the tempest'
'run ``--list-tests`` command')
diff --git a/tempest/lib/api_schema/response/compute/v2_63/__init__.py b/tempest/lib/api_schema/response/compute/v2_63/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_63/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
new file mode 100644
index 0000000..5cdaf54
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -0,0 +1,65 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_26 import servers as servers226
+from tempest.lib.api_schema.response.compute.v2_54 import servers as servers254
+from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+
+# Nova microversion 2.63 adds 'trusted_image_certificates' (a list of
+# certificate IDs) to the server rebuild and servers details responses.
+
+
+trusted_certs = {
+ 'type': ['array', 'null'],
+ 'minItems': 1,
+ 'maxItems': 50,
+ 'uniqueItems': True,
+ 'items': {
+ 'type': 'string',
+ 'minLength': 1
+ }
+}
+# list response schema wasn't changed for v2.63 so use v2.26
+list_servers = copy.deepcopy(servers226.list_servers)
+
+list_servers_detail = copy.deepcopy(servers254.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('trusted_image_certificates')
+
+rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+rebuild_server['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers257.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
+
+update_server = copy.deepcopy(servers254.update_server)
+update_server['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+update_server['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
+
+get_server = copy.deepcopy(servers254.get_server)
+get_server['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+get_server['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
new file mode 100644
index 0000000..510fc36
--- /dev/null
+++ b/tempest/lib/common/thread.py
@@ -0,0 +1,29 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This make disable relative module import
+from __future__ import absolute_import
+
+
+import six
+
+if six.PY2:
+ # module thread is removed in Python 3
+ from thread import get_ident # noqa: H237,F401
+
+else:
+ # On Python3 thread module has been deprecated and get_ident has been moved
+ # to threading module
+ from threading import get_ident # noqa: F401
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index f6e0e69..0314356 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -32,6 +32,7 @@
from tempest.lib.api_schema.response.compute.v2_54 import servers as schemav254
from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
+from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -51,7 +52,8 @@
{'min': '2.47', 'max': '2.47', 'schema': schemav247},
{'min': '2.48', 'max': '2.53', 'schema': schemav248},
{'min': '2.54', 'max': '2.56', 'schema': schemav254},
- {'min': '2.57', 'max': None, 'schema': schemav257}]
+ {'min': '2.57', 'max': '2.62', 'schema': schemav257},
+ {'min': '2.63', 'max': None, 'schema': schemav263}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
diff --git a/tempest/lib/services/network/agents_client.py b/tempest/lib/services/network/agents_client.py
index 9bdf090..a0f832e 100644
--- a/tempest/lib/services/network/agents_client.py
+++ b/tempest/lib/services/network/agents_client.py
@@ -18,35 +18,62 @@
class AgentsClient(base.BaseNetworkClient):
def update_agent(self, agent_id, **kwargs):
- """Update agent."""
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526673
+ """Update an agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#update-agent
+ """
uri = '/agents/%s' % agent_id
return self.update_resource(uri, kwargs)
def show_agent(self, agent_id, **fields):
+ """Show details for an agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#show-agent-details
+ """
uri = '/agents/%s' % agent_id
return self.show_resource(uri, **fields)
def list_agents(self, **filters):
+ """List all agents.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#list-all-agents
+ """
uri = '/agents'
return self.list_resources(uri, **filters)
def list_routers_on_l3_agent(self, agent_id):
+ """List routers that an l3 agent hosts.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#list-routers-hosted-by-an-l3-agent
+ """
uri = '/agents/%s/l3-routers' % agent_id
return self.list_resources(uri)
def create_router_on_l3_agent(self, agent_id, **kwargs):
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526670
+ """Add a router to an l3 agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#schedule-router-to-an-l3-agent
+ """
uri = '/agents/%s/l3-routers' % agent_id
return self.create_resource(uri, kwargs, expect_empty_body=True)
def delete_router_from_l3_agent(self, agent_id, router_id):
+ """Remove a router to an l3 agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#remove-l3-router-from-an-l3-agent
+ """
uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
return self.delete_resource(uri)
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
index 0fac6bd..1fde79f 100644
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ b/tempest/lib/services/volume/v1/encryption_types_client.py
@@ -38,7 +38,7 @@
def show_encryption_type(self, volume_type_id):
"""Get the volume encryption type for the specified volume type.
- volume_type_id: Id of volume_type.
+ :param volume_type_id: Id of volume type.
"""
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
@@ -61,7 +61,7 @@
return rest_client.ResponseBody(resp, body)
def delete_encryption_type(self, volume_type_id):
- """Delete the encryption type for the specified volume-type."""
+ """Delete the encryption type for the specified volume type."""
resp, body = self.delete(
"/types/%s/encryption/provider" % volume_type_id)
self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v3/encryption_types_client.py b/tempest/lib/services/volume/v3/encryption_types_client.py
index 7443a87..03de187 100644
--- a/tempest/lib/services/volume/v3/encryption_types_client.py
+++ b/tempest/lib/services/volume/v3/encryption_types_client.py
@@ -38,7 +38,7 @@
def show_encryption_type(self, volume_type_id):
"""Get the volume encryption type for the specified volume type.
- volume_type_id: Id of volume_type.
+ :param volume_type_id: Id of volume type.
"""
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 09036a4..22155a9 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -20,9 +20,15 @@
class ServicesClient(rest_client.RestClient):
- """Client class to send CRUD Volume API requests"""
+ """Client class to send CRUD Volume Services API requests"""
def list_services(self, **params):
+ """List all Cinder services.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#list-all-cinder-services
+ """
url = 'os-services'
if params:
url += '?%s' % urllib.urlencode(params)
@@ -31,3 +37,66 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def enable_service(self, **kwargs):
+ """Enable service on a host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#enable-a-cinder-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/enable', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def disable_service(self, **kwargs):
+ """Disable service on a host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#disable-a-cinder-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def disable_log_reason(self, **kwargs):
+ """Disable scheduling for a volume service and log disabled reason.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#log-disabled-cinder-service-information
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable-log-reason', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def freeze_host(self, **kwargs):
+ """Freeze a Cinder backend host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#freeze-a-cinder-backend-host
+ """
+ put_body = json.dumps(kwargs)
+ resp, _ = self.put('os-services/freeze', put_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp)
+
+ def thaw_host(self, **kwargs):
+ """Thaw a Cinder backend host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#thaw-a-cinder-backend-host
+ """
+ put_body = json.dumps(kwargs)
+ resp, _ = self.put('os-services/thaw', put_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
index 298925a..f79bcd8 100644
--- a/tempest/lib/services/volume/v3/snapshots_client.py
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -176,11 +176,12 @@
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
- """Update metadata item for the snapshot."""
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
+ """Update metadata for the snapshot for a specific key.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-a-snapshot-s-metadata-for-a-specific-key
+ """
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.put(url, put_body)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 6d9d03a..13ecd15 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -21,7 +21,7 @@
class TypesClient(rest_client.RestClient):
- """Client class to send CRUD Volume API requests"""
+ """Client class to send CRUD Volume Types API requests"""
def is_resource_deleted(self, id):
try:
@@ -36,7 +36,7 @@
return 'volume-type'
def list_volume_types(self, **params):
- """List all the volume_types created.
+ """List all the volume types created.
For a full list of available parameters, please refer to the official
API reference:
@@ -52,7 +52,7 @@
return rest_client.ResponseBody(resp, body)
def show_volume_type(self, volume_type_id):
- """Returns the details of a single volume_type.
+ """Returns the details of a single volume type.
For a full list of available parameters, please refer to the official
API reference:
@@ -78,7 +78,7 @@
return rest_client.ResponseBody(resp, body)
def delete_volume_type(self, volume_type_id):
- """Deletes the Specified Volume_type.
+ """Deletes the specified volume type.
For a full list of available parameters, please refer to the official
API reference:
@@ -89,11 +89,11 @@
return rest_client.ResponseBody(resp, body)
def list_volume_types_extra_specs(self, volume_type_id, **params):
- """List all the volume_types extra specs created.
+ """List all the volume type extra specs created.
- TODO: Current api-site doesn't contain this API description.
- After fixing the api-site, we need to fix here also for putting
- the link to api-site.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#show-all-extra-specifications-for-volume-type
"""
url = 'types/%s/extra_specs' % volume_type_id
if params:
@@ -105,7 +105,7 @@
return rest_client.ResponseBody(resp, body)
def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
- """Returns the details of a single volume_type extra spec."""
+ """Returns the details of a single volume type extra spec."""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
resp, body = self.get(url)
body = json.loads(body)
@@ -113,10 +113,10 @@
return rest_client.ResponseBody(resp, body)
def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
- """Creates a new Volume_type extra spec.
+ """Creates new volume type extra specs.
- volume_type_id: Id of volume_type.
- extra_specs: A dictionary of values to be used as extra_specs.
+ :param volume_type_id: Id of volume type.
+ :param extra_specs: A dictionary of values to be used as extra_specs.
"""
url = "types/%s/extra_specs" % volume_type_id
post_body = json.dumps({'extra_specs': extra_specs})
@@ -126,7 +126,7 @@
return rest_client.ResponseBody(resp, body)
def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
- """Deletes the Specified Volume_type extra spec."""
+ """Deletes the specified volume type extra spec."""
resp, body = self.delete("types/%s/extra_specs/%s" % (
volume_type_id, extra_spec_name))
self.expected_success(202, resp.status)
@@ -149,10 +149,10 @@
extra_specs):
"""Update a volume_type extra spec.
- volume_type_id: Id of volume_type.
- extra_spec_name: Name of the extra spec to be updated.
- extra_spec: A dictionary of with key as extra_spec_name and the
- updated value.
+ :param volume_type_id: Id of volume type.
+ :param extra_spec_name: Name of the extra spec to be updated.
+ :param extra_specs: A dictionary of with key as extra_spec_name and the
+ updated value.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-extra-specification-for-volume-type
diff --git a/tempest/scenario/test_volume_backup_restore.py b/tempest/scenario/test_volume_backup_restore.py
index c23b564..8a8c54e 100644
--- a/tempest/scenario/test_volume_backup_restore.py
+++ b/tempest/scenario/test_volume_backup_restore.py
@@ -14,6 +14,7 @@
# under the License.
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
@@ -56,6 +57,8 @@
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 2d024e9..1564f25 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -68,6 +68,9 @@
waiters.wait_for_server_termination(self.servers_client, server['id'])
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
+ # Note: This test is being skipped based on 'public_network_id'.
+ # It is being used in create_floating_ip() method which gets called
+ # from get_server_ip() method
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index a1c8c53..3ed8a10 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -17,6 +17,11 @@
import subprocess
import tempfile
+from mock import patch
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
from tempest.cmd import workspace
from tempest.lib.common.utils import data_utils
from tempest.tests import base
@@ -140,3 +145,42 @@
self.addCleanup(shutil.rmtree, path, ignore_errors=True)
self.workspace_manager.register_new_workspace(name, path)
self.assertIsNotNone(self.workspace_manager.get_workspace(name))
+
+ def test_workspace_name_not_exists(self):
+ nonexistent_name = data_utils.rand_uuid()
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager._name_exists,
+ nonexistent_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n" %
+ nonexistent_name)
+
+ def test_workspace_name_already_exists(self):
+ duplicate_name = self.name
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ _workspace_name_exists,
+ duplicate_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace already exists with name: %s.\n"
+ % duplicate_name)
+
+ def test_workspace_manager_path_not_exist(self):
+ fake_path = "fake_path"
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager._validate_path,
+ fake_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "Path does not exist.\n")
+
+ def test_workspace_manager_list_workspaces(self):
+ listed = self.workspace_manager.list_workspaces()
+ self.assertEqual(1, len(listed))
+ self.assertIn(self.name, listed)
+ self.assertEqual(self.path, listed.get(self.name))
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index eb1e2b6..938d226 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -75,61 +75,76 @@
class TestInterfaceWaiters(base.TestCase):
- def setUp(self):
- super(TestInterfaceWaiters, self).setUp()
- self.client = mock.MagicMock()
- self.client.build_timeout = 1
- self.client.build_interval = 1
- def _port_down(self):
- return {'interfaceAttachment': {'port_state': 'DOWN'}}
+ build_timeout = 1.
+ build_interval = 1
+ port_down = {'interfaceAttachment': {'port_state': 'DOWN'}}
+ port_active = {'interfaceAttachment': {'port_state': 'ACTIVE'}}
- def _port_active(self):
- return {'interfaceAttachment': {'port_state': 'ACTIVE'}}
+ def mock_client(self, **kwargs):
+ return mock.MagicMock(
+ build_timeout=self.build_timeout,
+ build_interval=self.build_interval,
+ **kwargs)
def test_wait_for_interface_status(self):
- self.client.show_interface.side_effect = [self._port_down(),
- self._port_active()]
- with mock.patch.object(time, 'sleep') as sleep_mock:
- start_time = int(time.time())
- waiters.wait_for_interface_status(self.client, 'server_id',
- 'port_id', 'ACTIVE')
- end_time = int(time.time())
- self.assertLess(end_time, (start_time + self.client.build_timeout))
- sleep_mock.assert_called_once_with(self.client.build_interval)
+ show_interface = mock.Mock(
+ side_effect=[self.port_down, self.port_active])
+ client = self.mock_client(show_interface=show_interface)
+ self.patch('time.time', return_value=0.)
+ sleep = self.patch('time.sleep')
+
+ result = waiters.wait_for_interface_status(
+ client, 'server_id', 'port_id', 'ACTIVE')
+
+ self.assertIs(self.port_active['interfaceAttachment'], result)
+ show_interface.assert_has_calls([mock.call('server_id', 'port_id'),
+ mock.call('server_id', 'port_id')])
+ sleep.assert_called_once_with(client.build_interval)
def test_wait_for_interface_status_timeout(self):
- time_mock = self.patch('time.time')
- time_mock.side_effect = utils.generate_timeout_series(1)
+ show_interface = mock.MagicMock(return_value=self.port_down)
+ client = self.mock_client(show_interface=show_interface)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ sleep = self.patch('time.sleep')
- self.client.show_interface.return_value = self._port_down()
self.assertRaises(lib_exc.TimeoutException,
waiters.wait_for_interface_status,
- self.client, 'server_id', 'port_id', 'ACTIVE')
+ client, 'server_id', 'port_id', 'ACTIVE')
- def _one_interface(self):
- return {'interfaceAttachments': [{'port_id': 'port_one'}]}
+ show_interface.assert_has_calls([mock.call('server_id', 'port_id'),
+ mock.call('server_id', 'port_id')])
+ sleep.assert_called_once_with(client.build_interval)
- def _two_interfaces(self):
- return {'interfaceAttachments': [{'port_id': 'port_one'},
- {'port_id': 'port_two'}]}
+ one_interface = {'interfaceAttachments': [{'port_id': 'port_one'}]}
+ two_interfaces = {'interfaceAttachments': [{'port_id': 'port_one'},
+ {'port_id': 'port_two'}]}
def test_wait_for_interface_detach(self):
- self.client.list_interfaces.side_effect = [self._two_interfaces(),
- self._one_interface()]
- with mock.patch.object(time, 'sleep') as sleep_mock:
- start_time = int(time.time())
- waiters.wait_for_interface_detach(self.client, 'server_id',
- 'port_two')
- end_time = int(time.time())
- self.assertLess(end_time, (start_time + self.client.build_timeout))
- sleep_mock.assert_called_once_with(self.client.build_interval)
+ list_interfaces = mock.MagicMock(
+ side_effect=[self.two_interfaces, self.one_interface])
+ client = self.mock_client(list_interfaces=list_interfaces)
+ self.patch('time.time', return_value=0.)
+ sleep = self.patch('time.sleep')
+
+ result = waiters.wait_for_interface_detach(
+ client, 'server_id', 'port_two')
+
+ self.assertIs(self.one_interface['interfaceAttachments'], result)
+ list_interfaces.assert_has_calls([mock.call('server_id'),
+ mock.call('server_id')])
+ sleep.assert_called_once_with(client.build_interval)
def test_wait_for_interface_detach_timeout(self):
- time_mock = self.patch('time.time')
- time_mock.side_effect = utils.generate_timeout_series(1)
+ list_interfaces = mock.MagicMock(return_value=self.one_interface)
+ client = self.mock_client(list_interfaces=list_interfaces)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ sleep = self.patch('time.sleep')
- self.client.list_interfaces.return_value = self._one_interface()
self.assertRaises(lib_exc.TimeoutException,
waiters.wait_for_interface_detach,
- self.client, 'server_id', 'port_one')
+ client, 'server_id', 'port_one')
+
+ list_interfaces.assert_has_calls([mock.call('server_id'),
+ mock.call('server_id')])
+ sleep.assert_called_once_with(client.build_interval)
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index f638ba6..865767b 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -12,12 +12,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+import time
+
import mock
+from tempest.lib.common import thread
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
from tempest.tests import base
-from tempest.tests import utils
class TestTestUtils(base.TestCase):
@@ -78,47 +81,126 @@
42, test_utils.call_and_ignore_notfound_exc(m, *args, **kwargs))
m.assert_called_once_with(*args, **kwargs)
- @mock.patch('time.sleep')
- @mock.patch('time.time')
- def test_call_until_true_when_f_never_returns_true(self, m_time, m_sleep):
- def set_value(bool_value):
- return bool_value
- timeout = 42 # The value doesn't matter as we mock time.time()
- sleep = 60 # The value doesn't matter as we mock time.sleep()
- m_time.side_effect = utils.generate_timeout_series(timeout)
- self.assertEqual(
- False, test_utils.call_until_true(set_value, timeout, sleep, False)
- )
- m_sleep.call_args_list = [mock.call(sleep)] * 2
- m_time.call_args_list = [mock.call()] * 2
- @mock.patch('time.sleep')
- @mock.patch('time.time')
- def test_call_until_true_when_f_returns_true(self, m_time, m_sleep):
- def set_value(bool_value=False):
- return bool_value
- timeout = 42 # The value doesn't matter as we mock time.time()
- sleep = 60 # The value doesn't matter as we mock time.sleep()
- m_time.return_value = 0
- self.assertEqual(
- True, test_utils.call_until_true(set_value, timeout, sleep,
- bool_value=True)
- )
- self.assertEqual(0, m_sleep.call_count)
- # when logging cost time we need to acquire current time.
- self.assertEqual(2, m_time.call_count)
+class TestCallUntilTrue(base.TestCase):
- @mock.patch('time.sleep')
- @mock.patch('time.time')
- def test_call_until_true_when_f_returns_true_no_param(
- self, m_time, m_sleep):
- def set_value(bool_value=False):
- return bool_value
- timeout = 42 # The value doesn't matter as we mock time.time()
- sleep = 60 # The value doesn't matter as we mock time.sleep()
- m_time.side_effect = utils.generate_timeout_series(timeout)
- self.assertEqual(
- False, test_utils.call_until_true(set_value, timeout, sleep)
- )
- m_sleep.call_args_list = [mock.call(sleep)] * 2
- m_time.call_args_list = [mock.call()] * 2
+ def test_call_until_true_when_true_at_first_call(self):
+ """func returns True at first call
+
+ """
+ self._test_call_until_true(return_values=[True],
+ duration=30.,
+ time_sequence=[10., 60.])
+
+ def test_call_until_true_when_true_before_timeout(self):
+ """func returns false at first call, then True before timeout
+
+ """
+ self._test_call_until_true(return_values=[False, True],
+ duration=30.,
+ time_sequence=[10., 39., 41.])
+
+ def test_call_until_true_when_never_true_before_timeout(self):
+ """func returns false, then false, just before timeout
+
+ """
+ self._test_call_until_true(return_values=[False, False],
+ duration=30.,
+ time_sequence=[10., 39., 41.])
+
+ def test_call_until_true_with_params(self):
+ """func is called using given parameters
+
+ """
+ self._test_call_until_true(return_values=[False, True],
+ duration=30.,
+ time_sequence=[10., 30., 60.],
+ args=(1, 2),
+ kwargs=dict(foo='bar', bar='foo'))
+
+ def _test_call_until_true(self, return_values, duration, time_sequence,
+ args=None, kwargs=None):
+ """Test call_until_true function
+
+ :param return_values: list of booleans values to be returned
+ each time given function is called. If any of these values
+ is not consumed by calling the function the test fails.
+ The list must contain a sequence of False items terminated
+ by a single True or False
+ :param duration: parameter passed to call_until_true function
+ (a floating point value).
+ :param time_sequence: sequence of time values returned by
+ mocked time.time function used to trigger call_until_true
+ behavior when handling timeout condition. The sequence must
+ contain the exact number of values expected to be consumed by
+ each time call_until_true calls time.time function.
+ :param args: sequence of positional arguments to be passed
+ to call_until_true function.
+ :param kwargs: sequence of named arguments to be passed
+ to call_until_true function.
+ """
+
+ # all values except the last are False
+ self.assertEqual([False] * len(return_values[:-1]), return_values[:-1])
+ # last value can be True or False
+ self.assertIn(return_values[-1], [True, False])
+
+ # GIVEN
+ func = mock.Mock(side_effect=return_values)
+ sleep = 10. # this value has no effect as time.sleep is being mocked
+ sleep_func = self.patch('time.sleep')
+ time_func = self._patch_time(time_sequence)
+ args = args or tuple()
+ kwargs = kwargs or dict()
+
+ # WHEN
+ result = test_utils.call_until_true(func, duration, sleep,
+ *args, **kwargs)
+ # THEN
+
+ # It must return last returned value
+ self.assertIs(return_values[-1], result)
+
+ self._test_func_calls(func, return_values, *args, **kwargs)
+ self._test_sleep_calls(sleep_func, return_values, sleep)
+ # The number of times time.time is called is not relevant as a
+ # requirement of call_until_true. What is instead relevant is that
+ # call_until_true use a mocked function to make the test reliable
+ # and the test actually provide the right sequence of numbers to
+ # reproduce the behavior has to be tested
+ self._assert_called_n_times(time_func, len(time_sequence))
+
+ def _patch_time(self, time_sequence):
+ # Iterator over time sequence
+ time_iterator = iter(time_sequence)
+ # Preserve original time.time() behavior for other threads
+ original_time = time.time
+ thread_id = thread.get_ident()
+
+ def mocked_time():
+ if thread.get_ident() == thread_id:
+ # Test thread => return time sequence values
+ return next(time_iterator)
+ else:
+ # Other threads => call original time function
+ return original_time()
+
+ return self.patch('time.time', side_effect=mocked_time)
+
+ def _test_func_calls(self, func, return_values, *args, **kwargs):
+ self._assert_called_n_times(func, len(return_values), *args, **kwargs)
+
+ def _test_sleep_calls(self, sleep_func, return_values, sleep):
+ # count first consecutive False
+ expected_count = 0
+ for value in return_values:
+ if value:
+ break
+ expected_count += 1
+ self._assert_called_n_times(sleep_func, expected_count, sleep)
+
+ def _assert_called_n_times(self, mock_func, expected_count, *args,
+ **kwargs):
+ calls = [mock.call(*args, **kwargs)] * expected_count
+ self.assertEqual(expected_count, mock_func.call_count)
+ mock_func.assert_has_calls(calls)
diff --git a/tempest/tests/lib/services/volume/v3/test_services_client.py b/tempest/tests/lib/services/volume/v3/test_services_client.py
new file mode 100644
index 0000000..f65228f
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_services_client.py
@@ -0,0 +1,214 @@
+# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.services.volume.v3 import services_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServicesClient(base.BaseServiceTest):
+
+ FAKE_SERVICE_LIST = {
+ "services": [
+ {
+ "status": "enabled",
+ "binary": "cinder-backup",
+ "zone": "nova",
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:17.000000",
+ "host": "fake-host",
+ "disabled_reason": None
+ },
+ {
+ "status": "enabled",
+ "binary": "cinder-scheduler",
+ "zone": "nova",
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:24.000000",
+ "host": "fake-host",
+ "disabled_reason": None
+ },
+ {
+ "status": "enabled",
+ "binary": "cinder-volume",
+ "zone": "nova",
+ "frozen": False,
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:20.000000",
+ "host": "fake-host@lvm",
+ "replication_status": "disabled",
+ "active_backend_id": None,
+ "disabled_reason": None
+ }
+ ]
+ }
+
+ FAKE_SERVICE_REQUEST = {
+ "host": "fake-host",
+ "binary": "cinder-volume"
+ }
+
+ FAKE_SERVICE_RESPONSE = {
+ "disabled": False,
+ "status": "enabled",
+ "host": "fake-host@lvm",
+ "service": "",
+ "binary": "cinder-volume",
+ "disabled_reason": None
+ }
+
+ def setUp(self):
+ super(TestServicesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = services_client.ServicesClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_services(self, bytes_body=False,
+ mock_args='os-services', **params):
+ self.check_service_client_function(
+ self.client.list_services,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SERVICE_LIST,
+ to_utf=bytes_body,
+ mock_args=[mock_args],
+ **params)
+
+ def _test_enable_service(self, bytes_body=False):
+ resp_body = self.FAKE_SERVICE_RESPONSE
+ kwargs = self.FAKE_SERVICE_REQUEST
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.enable_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/enable', payload],
+ **kwargs)
+
+ def _test_disable_service(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
+ resp_body.pop('disabled_reason')
+ resp_body['disabled'] = True
+ resp_body['status'] = 'disabled'
+ kwargs = self.FAKE_SERVICE_REQUEST
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.disable_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/disable', payload],
+ **kwargs)
+
+ def _test_disable_log_reason(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
+ resp_body['disabled_reason'] = "disabled for test"
+ resp_body['disabled'] = True
+ resp_body['status'] = 'disabled'
+ kwargs = copy.deepcopy(self.FAKE_SERVICE_REQUEST)
+ kwargs.update({"disabled_reason": "disabled for test"})
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.disable_log_reason,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/disable-log-reason', payload],
+ **kwargs)
+
+ def _test_freeze_host(self, bytes_body=False):
+ kwargs = {'host': 'host1@lvm'}
+ self.check_service_client_function(
+ self.client.freeze_host,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ **kwargs)
+
+ def _test_thaw_host(self, bytes_body=False):
+ kwargs = {'host': 'host1@lvm'}
+ self.check_service_client_function(
+ self.client.thaw_host,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ **kwargs)
+
+ def test_list_services_with_str_body(self):
+ self._test_list_services()
+
+ def test_list_services_with_bytes_body(self):
+ self._test_list_services(bytes_body=True)
+
+ def test_list_services_with_params(self):
+ mock_args = 'os-services?host=fake-host'
+ self._test_list_services(mock_args=mock_args, host='fake-host')
+
+ def test_enable_service_with_str_body(self):
+ self._test_enable_service()
+
+ def test_enable_service_with_bytes_body(self):
+ self._test_enable_service(bytes_body=True)
+
+ def test_disable_service_with_str_body(self):
+ self._test_disable_service()
+
+ def test_disable_service_with_bytes_body(self):
+ self._test_disable_service(bytes_body=True)
+
+ def test_disable_log_reason_with_str_body(self):
+ self._test_disable_log_reason()
+
+ def test_disable_log_reason_with_bytes_body(self):
+ self._test_disable_log_reason(bytes_body=True)
+
+ def test_freeze_host_with_str_body(self):
+ self._test_freeze_host()
+
+ def test_freeze_host_with_bytes_body(self):
+ self._test_freeze_host(bytes_body=True)
+
+ def test_thaw_host_with_str_body(self):
+ self._test_thaw_host()
+
+ def test_thaw_host_with_bytes_body(self):
+ self._test_thaw_host(bytes_body=True)