Merge "Cleanup networks resources at test level"
diff --git a/.zuul.yaml b/.zuul.yaml
index 8dcb935..8ab3028 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,7 +1,13 @@
- job:
name: devstack-tempest
parent: devstack
- description: Base Tempest job.
+ nodeset: openstack-single-node
+ description: |
+ Base Tempest job.
+
+ This Tempest job provides the base for both the single and multi-node
+ test setup. To run a multi-node test inherit from devstack-tempest and
+ set the nodeset to a multi-node one.
required-projects:
- openstack/tempest
timeout: 7200
@@ -10,6 +16,11 @@
vars:
devstack_services:
tempest: true
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute:
+ min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
test_results_stage_name: 'test_results'
zuul_copy_output:
'{{ devstack_base_dir }}/tempest/etc/tempest.conf': 'logs'
@@ -81,6 +92,36 @@
# without Swift, c-bak cannot run (in the Gate at least)
c-bak: false
+- job:
+ name: tempest-multinode-full
+ parent: devstack-tempest
+ nodeset: openstack-two-node
+ # Until the devstack changes are backported, only run this on master
+ branches:
+ - master
+ description: |
+ Base multinode integration test with Neutron networking and py27.
+ Former names for this job where:
+ * neutron-tempest-multinode-full
+ * legacy-tempest-dsvm-neutron-multinode-full
+ * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+ This job includes two nodes, controller / tempest plus a subnode, but
+ it can be used with different topologies, as long as a controller node
+ and a tempest one exist.
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ FORCE_CONFIG_DRIVE: False
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+ group-vars:
+ peers:
+ devstack_localrc:
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+
- nodeset:
name: openstack-bionic-node
nodes:
@@ -101,6 +142,19 @@
Base integration test with Neutron networking and py36.
voting: false
+# TODO(gmann): needs to migrate this to zuulv3
+- job:
+ name: tempest-scenario-multinode-lvm-multibackend
+ parent: legacy-dsvm-base-multinode
+ run: playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml
+ post-run: playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml
+ timeout: 10800
+ required-projects:
+ - openstack-infra/devstack-gate
+ - openstack/neutron
+ - openstack/tempest
+ nodeset: ubuntu-xenial-2-node
+
- job:
name: tempest-full-queens
parent: tempest-full
@@ -199,6 +253,19 @@
- openstack/zaqar-tempest-plugin
- openstack/zun-tempest-plugin
+- job:
+ name: tempest-cinder-v2-api
+ parent: devstack-tempest
+ branches:
+ - master
+ description: |
+ This job runs the cinder API test against v2 endpoint.
+ vars:
+ tox_envlist: all
+ tempest_test_regex: api.*volume
+ devstack_localrc:
+ TEMPEST_VOLUME_TYPE: volumev2
+
- project:
check:
jobs:
@@ -258,12 +325,28 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - tempest-multinode-full:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
- tempest-tox-plugin-sanity-check
- gate:
- jobs:
- - nova-multiattach
- experimental:
- jobs:
+ - tempest-scenario-multinode-lvm-multibackend:
+ voting: false
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
- nova-cells-v1:
irrelevant-files:
- ^(test-|)requirements.txt$
@@ -274,6 +357,11 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ gate:
+ jobs:
+ - nova-multiattach
+ experimental:
+ jobs:
- nova-live-migration:
irrelevant-files:
- ^(test-|)requirements.txt$
@@ -284,6 +372,16 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - tempest-cinder-v2-api:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
periodic-stable:
jobs:
- tempest-full-queens
diff --git a/README.rst b/README.rst
index 044ae09..2243536 100644
--- a/README.rst
+++ b/README.rst
@@ -212,18 +212,6 @@
For more information on these options and details about stestr, please see the
`stestr documentation <http://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
-Python 2.6
-----------
-
-Starting in the Kilo release the OpenStack services dropped all support for
-python 2.6. This change has been mirrored in Tempest, starting after the
-tempest-2 tag. This means that proposed changes to Tempest which only fix
-python 2.6 compatibility will be rejected, and moving forward more features not
-present in python 2.6 will be used. If you're running your OpenStack services
-on an earlier release with python 2.6 you can easily run Tempest against it
-from a remote system running python 2.7. (or deploy a cloud guest in your cloud
-that has python 2.7)
-
Python 3.x
----------
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f562850..fecf98a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -80,6 +80,14 @@
library
+Support Policy
+--------------
+
+.. toctree::
+ :maxdepth: 2
+
+ stable_branch_support_policy
+
Indices and tables
==================
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 3bc1d0c..ea868ae 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -354,6 +354,10 @@
.. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+ * `2.49`_
+
+ .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+
* `2.54`_
.. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id4
@@ -362,9 +366,17 @@
.. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
+ * `2.57`_
+
+ .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id51
+
* `2.60`_
- .. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id54
+ .. _2.60: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-queens
+
+ * `2.63`_
+
+ .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
* Volume
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
new file mode 100644
index 0000000..87e3ad1
--- /dev/null
+++ b/doc/source/stable_branch_support_policy.rst
@@ -0,0 +1,30 @@
+Stable Branch Support Policy
+============================
+
+Since the `Extended Maintenance policy`_ for stable branches was adopted
+OpenStack projects will keep stable branches around after a "stable" or
+"maintained" period for a phase of indeterminate length called "Extended
+Maintenance". Prior to this resolution Tempest supported all stable branches
+which were supported upstream. This policy does not scale under the new model
+as Tempest would be responsible for gating proposed changes against an ever
+increasing number of branches. Therefore due to resource constraints, Tempest
+will only provide support for branches in the "Maintained" phase from the
+documented `Support Phases`_. When a branch moves from the *Maintained* to the
+*Extended Maintenance* phase, Tempest will tag the removal of support for that
+branch as it has in the past when a branch goes end of life.
+
+The expectation for *Extended Maintenance* phase branches is that they will continue
+running Tempest during that phase of support. Since the REST APIs are stable
+interfaces across release boundaries, branches in these phases should run
+Tempest from master as long as possible. But, because we won't be actively
+testing branches in these phases, it's possible that we'll introduce changes to
+Tempest on master which will break support on *Extended Maintenance* phase
+branches. When this happens the expectation for those branches is to either
+switch to running Tempest from a tag with support for the branch, or blacklist
+a newly introduced test (if that is the cause of the issue). Tempest will not
+be creating stable branches to support *Extended Maintenance* phase branches, as
+the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
+project, to support that branch.
+
+.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
+.. _Support Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index a684984..01155a8 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -3,7 +3,7 @@
# avoid zuul retrying on legitimate failures.
- hosts: all
roles:
- - run-devstack
+ - orchestrate-devstack
# We run tests only on one node, regardless how many nodes are in the system
- hosts: tempest
diff --git a/playbooks/post-tempest.yaml b/playbooks/post-tempest.yaml
index 4dde2c9..6e0bcad 100644
--- a/playbooks/post-tempest.yaml
+++ b/playbooks/post-tempest.yaml
@@ -1,4 +1,4 @@
-- hosts: all
+- hosts: tempest
become: true
roles:
- role: fetch-subunit-output
diff --git a/playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml b/playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml
new file mode 100644
index 0000000..e07f551
--- /dev/null
+++ b/playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml
@@ -0,0 +1,15 @@
+- hosts: primary
+ tasks:
+
+ - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
+ synchronize:
+ src: '{{ ansible_user_dir }}/workspace/'
+ dest: '{{ zuul.executor.log_root }}'
+ mode: pull
+ copy_links: true
+ verify_host: true
+ rsync_opts:
+ - --include=/logs/**
+ - --include=*/
+ - --exclude=*
+ - --prune-empty-dirs
diff --git a/playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml b/playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml
new file mode 100644
index 0000000..03f64f9
--- /dev/null
+++ b/playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml
@@ -0,0 +1,65 @@
+- hosts: primary
+ name: Autoconverted job tempest-scenario-multinode-lvm-multibackend
+ from old job gate-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend-ubuntu-xenial-nv
+ tasks:
+
+ - name: Ensure legacy workspace directory
+ file:
+ path: '{{ ansible_user_dir }}/workspace'
+ state: directory
+
+ - shell:
+ cmd: |
+ set -e
+ set -x
+ cat > clonemap.yaml << EOF
+ clonemap:
+ - name: openstack-infra/devstack-gate
+ dest: devstack-gate
+ EOF
+ /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
+ git://git.openstack.org \
+ openstack-infra/devstack-gate
+ executable: /bin/bash
+ chdir: '{{ ansible_user_dir }}/workspace'
+ environment: '{{ zuul | zuul_legacy_vars }}'
+
+ - shell:
+ cmd: |
+ set -e
+ set -x
+ cat << 'EOF' >>"/tmp/dg-local.conf"
+ [[local|localrc]]
+ ENABLE_IDENTITY_V2=False
+ TEMPEST_USE_TEST_ACCOUNTS=True
+ # Enable lvm multiple backends to run multi backend slow scenario tests.
+ # Note: multi backend experimental job exclude the slow scenario tests.
+ CINDER_ENABLED_BACKENDS=lvm:lvmdriver-1,lvm:lvmdriver-2
+
+ EOF
+ executable: /bin/bash
+ chdir: '{{ ansible_user_dir }}/workspace'
+ environment: '{{ zuul | zuul_legacy_vars }}'
+
+ - shell:
+ cmd: |
+ set -e
+ set -x
+ export PYTHONUNBUFFERED=true
+ export DEVSTACK_GATE_TEMPEST=1
+ # Run scenario and nova migration tests with concurrency 2
+ export DEVSTACK_GATE_TEMPEST_REGEX='(^tempest\.(scenario|api\.compute\.admin\.test_(live_|)migration))'
+ export TEMPEST_CONCURRENCY=2
+ export DEVSTACK_GATE_NEUTRON=1
+ export DEVSTACK_GATE_TLSPROXY=1
+ export BRANCH_OVERRIDE=default
+ if [ "$BRANCH_OVERRIDE" != "default" ] ; then
+ export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
+ fi
+ export DEVSTACK_GATE_TOPOLOGY="multinode"
+
+ cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
+ ./safe-devstack-vm-gate-wrap.sh
+ executable: /bin/bash
+ chdir: '{{ ansible_user_dir }}/workspace'
+ environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml b/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
index fc061bc..9ae46fd 100644
--- a/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
+++ b/releasenotes/notes/16/16.0.0-add-content-type-without-spaces-b2c9b91b257814f3.yaml
@@ -6,4 +6,4 @@
The lack of these additional content-type will cause defcore test
to fail for OpenStack public cloud which uses tomcat module in the
api gateway. The additions are ``application/json;charset=utf-8``,
- ``text/html;charset=utf-8``,``text/plain;charset=utf-8``
\ No newline at end of file
+ ``text/html;charset=utf-8``, ``text/plain;charset=utf-8``
diff --git a/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml b/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
index 73900ca..e9c3694 100644
--- a/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
+++ b/releasenotes/notes/16/16.0.0-add-tempest-run-combine-option-e94c1049ba8985d5.yaml
@@ -1,6 +1,6 @@
---
features:
- |
- Adds a new cli option to tempest run, --combine, which is used to indicate
- you want the subunit stream output combined with the previous run's in
- the testr repository
+ Adds a new cli option to tempest run, ``--combine``, which is used
+ to indicate you want the subunit stream output combined with the
+ previous run's in the testr repository
diff --git a/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
index 9d7102f..5b4a96d 100644
--- a/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
+++ b/releasenotes/notes/16/16.0.0-remove-deprecated-allow_port_security_disabled-option-d0ffaeb2e7817707.yaml
@@ -1,5 +1,5 @@
---
upgrade:
- |
- The deprecated config option 'allow_port_security_disabled' from compute_feature_enabled
- group has been removed.
+ The deprecated config option ``allow_port_security_disabled`` from
+ ``compute_feature_enabled`` group has been removed.
diff --git a/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml b/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
index b4e4dd1..c8b0ca8 100644
--- a/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
+++ b/releasenotes/notes/16/16.0.0-remove-deprecated-compute-validation-config-options-part-2-5cd17b6e0e6cb8a3.yaml
@@ -8,4 +8,4 @@
- ``compute.ssh_user`` (available as ``validation.image_ssh_user``)
- ``scenario.ssh_user`` (available as ``validation.image_ssh_user``)
- ``compute.network_for_ssh`` (available as ``validation.network_for_ssh``)
- - ``compute.ping_timeout `` (available as ``validation.ping_timeout``)
+ - ``compute.ping_timeout`` (available as ``validation.ping_timeout``)
diff --git a/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml b/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml
new file mode 100644
index 0000000..03d0ae8
--- /dev/null
+++ b/releasenotes/notes/add-extra-apis-to-volume-v3-services-client-bf9b235cf5a611fe.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add ``enable_service``, ``disable_service`` , ``disable_log_reason``,
+ ``freeze_host`` and ``thaw_host`` API endpoints to volume v3
+ ``services_client``.
diff --git a/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
index 403bbad..145e7dd 100644
--- a/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
+++ b/releasenotes/notes/add-load-list-cmd-35a4a2e6ea0a36fd.yaml
@@ -1,7 +1,7 @@
---
features:
- |
- Adds a new cli option to tempest run, --load-list <list-file>
+ Adds a new cli option to tempest run, ``--load-list <list-file>``
to specify target tests to run from a list-file. The list-file
- supports the output format of the tempest run --list-tests
+ supports the output format of the tempest run ``--list-tests``
command.
diff --git a/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml b/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
index b54ee8b..19d47d1 100644
--- a/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
+++ b/releasenotes/notes/add-port-profile-config-option-2610b2fa67027960.yaml
@@ -1,11 +1,9 @@
---
-prelude: >
- When using OVS HW offload feature we need to create
- Neutron port with a certain capability. This is done
- by creating Neutron port with binding profile. To be
- able to test this we need profile capability support
- in Tempest as well.
features:
- A new config option 'port_profile' is added to the section
'network' to specify capabilities of the port.
- By default this is set to {}.
+ By default this is set to {}. When using OVS HW offload
+ feature we need to create Neutron port with a certain
+ capability. This is done by creating Neutron port with
+ binding profile. To be able to test this we need profile
+ capability support in Tempest as well.
diff --git a/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml b/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
index 8fdf4f0..abd2610 100644
--- a/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
+++ b/releasenotes/notes/add-save-state-option-5ea67858cbaca969.yaml
@@ -1,4 +1,5 @@
---
features:
- |
- Add --save-state option to allow saving state of cloud before tempest run.
+ Add ``--save-state`` option to allow saving state of cloud before
+ tempest run.
diff --git a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
index 406e282..6c44ba0 100644
--- a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
+++ b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
@@ -3,5 +3,5 @@
- |
Add extension API show quota details to network quotas_client library.
This feature enables the possibility to show a quota set for a specified
- project that includes the quota’s used, limit and reserved counts for per
- resource
+ project that includes the quota's used, limit and reserved counts per
+ resource.
diff --git a/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
index e3443c8..2a0a86c 100644
--- a/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
+++ b/releasenotes/notes/cli-tests-v3fixes-fb38189cefd64213.yaml
@@ -2,8 +2,8 @@
other:
- |
The CLIClient class, when it calls a command line client, uses
- --os-project-name instead of --os-tenant-name for the project, and
- passes --os-identity-api-version (default empty).
+ ``--os-project-name`` instead of ``--os-tenant-name`` for the
+ project, and passes ``--os-identity-api-version`` (default empty).
All CLI clients still available in supported releases of OpenStack
- which are wrapped by the cmd_with_auth() method support those
+ which are wrapped by the ``cmd_with_auth()`` method support those
switches.
diff --git a/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml b/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml
index 265853d..0f9a0f6 100644
--- a/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml
+++ b/releasenotes/notes/tempest-run-fix-updates-564b41706decbba1.yaml
@@ -1,8 +1,8 @@
---
features:
- |
- Adds a new CLI arg in tempest run, --black-regex, which is a regex to
- exclude the tests that match it.
+ Adds a new CLI arg in tempest run, ``--black-regex``, which is a
+ regex to exclude the tests that match it.
fixes:
- |
Fixes tempest run CLI args mutually exclusive behavior which should not
diff --git a/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml b/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml
index ec21098..c69ed50 100644
--- a/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml
+++ b/releasenotes/notes/tempest-workspace-delete-directory-feature-74d6d157a5a05561.yaml
@@ -1,5 +1,5 @@
---
features:
- |
- Added tempest workspace remove --name <workspace_name> --rmdir
+ Added tempest workspace remove ``--name <workspace_name> --rmdir``
feature to delete the workspace directory as well as entry.
diff --git a/tempest/README.rst b/tempest/README.rst
index 62821de..a5f4a92 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -12,12 +12,12 @@
and guidelines. Below is the overview of the Tempest respository structure
to make this clear.
- .. code-block:: console
+.. code-block:: console
- tempest/
- api/ - API tests
- scenario/ - complex scenario tests
- tests/ - unit tests for Tempest internals
+ tempest/
+ api/ - API tests
+ scenario/ - complex scenario tests
+ tests/ - unit tests for Tempest internals
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index c2bdf7e..df534bc 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -46,13 +46,19 @@
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.quotas_client.tenant_id
- cls.default_quota_set = set(('injected_file_content_bytes',
- 'metadata_items', 'injected_files',
- 'ram', 'floating_ips',
- 'fixed_ips', 'key_pairs',
- 'injected_file_path_bytes',
- 'instances', 'security_group_rules',
- 'cores', 'security_groups'))
+ cls.default_quota_set = set(('metadata_items', 'ram', 'key_pairs',
+ 'instances', 'cores',
+ 'server_group_members', 'server_groups'))
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['fixed_ips', 'floating_ips',
+ 'security_group_rules',
+ 'security_groups'])
+ if cls.is_requested_microversion_compatible('2.56'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['injected_file_content_bytes',
+ 'injected_file_path_bytes',
+ 'injected_files'])
@decorators.idempotent_id('3b0a7c8f-cf58-46b8-a60c-715a32a8ba7d')
def test_get_default_quotas(self):
@@ -69,13 +75,19 @@
# Admin can update all the resource quota limits for a tenant
default_quota_set = self.adm_client.show_default_quota_set(
self.demo_tenant_id)['quota_set']
- new_quota_set = {'injected_file_content_bytes': 20480,
- 'metadata_items': 256, 'injected_files': 10,
- 'ram': 10240, 'floating_ips': 20, 'fixed_ips': 10,
- 'key_pairs': 200, 'injected_file_path_bytes': 512,
- 'instances': 20, 'security_group_rules': 20,
- 'cores': 2, 'security_groups': 20,
- 'server_groups': 20, 'server_group_members': 20}
+ new_quota_set = {'metadata_items': 256, 'ram': 10240,
+ 'key_pairs': 200, 'instances': 20,
+ 'server_groups': 20,
+ 'server_group_members': 20, 'cores': 2}
+ if self.is_requested_microversion_compatible('2.35'):
+ new_quota_set.update({'fixed_ips': 10, 'floating_ips': 20,
+ 'security_group_rules': 20,
+ 'security_groups': 20})
+ if self.is_requested_microversion_compatible('2.56'):
+ new_quota_set.update({'injected_file_content_bytes': 20480,
+ 'injected_file_path_bytes': 512,
+ 'injected_files': 10})
+
# Update limits for all quota resources
quota_set = self.adm_client.update_quota_set(
self.demo_tenant_id,
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 5ef7ee4..f90ff92 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -22,12 +22,12 @@
CONF = config.CONF
-class QuotasAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
+class QuotasAdminNegativeTestBase(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
@classmethod
def setup_clients(cls):
- super(QuotasAdminNegativeTestJSON, cls).setup_clients()
+ super(QuotasAdminNegativeTestBase, cls).setup_clients()
cls.client = cls.os_primary.quotas_client
cls.adm_client = cls.os_admin.quotas_client
cls.sg_client = cls.security_groups_client
@@ -35,7 +35,7 @@
@classmethod
def resource_setup(cls):
- super(QuotasAdminNegativeTestJSON, cls).resource_setup()
+ super(QuotasAdminNegativeTestBase, cls).resource_setup()
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.client.tenant_id
@@ -51,6 +51,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
**{quota_item: default_quota_value})
+
+class QuotasAdminNegativeTest(QuotasAdminNegativeTestBase):
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('733abfe8-166e-47bb-8363-23dbd7ff3476')
def test_update_quota_normal_user(self):
@@ -85,6 +88,10 @@
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.create_test_server)
+
+class QuotasSecurityGroupAdminNegativeTest(QuotasAdminNegativeTestBase):
+ max_microversion = '2.35'
+
@decorators.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
index f2f3b57..bca6a22 100644
--- a/tempest/api/compute/admin/test_security_group_default_rules.py
+++ b/tempest/api/compute/admin/test_security_group_default_rules.py
@@ -23,6 +23,7 @@
class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
+ max_microversion = '2.35'
@classmethod
# TODO(GMann): Once Bug# 1311500 is fixed, these test can run
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index ff9caa3..f0178aa 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -20,6 +20,7 @@
class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
+ max_microversion = '2.35'
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 975728c..d0c1973 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -108,6 +108,35 @@
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
+ cls._check_depends_on_nova_network()
+
+ @classmethod
+ def _check_depends_on_nova_network(cls):
+ # Since nova-network APIs were removed from Nova in the Rocky release,
+ # determine, based on the max version from the version document, if
+ # the compute API is >Queens and if so, skip tests that rely on
+ # nova-network.
+ if not getattr(cls, 'depends_on_nova_network', False):
+ return
+ versions = cls.versions_client.list_versions()['versions']
+ # Find the v2.1 version which will tell us our max version for the
+ # compute API we're testing against.
+ for version in versions:
+ if version['id'] == 'v2.1':
+ max_version = api_version_request.APIVersionRequest(
+ version['version'])
+ break
+ else:
+ LOG.warning(
+ 'Unable to determine max v2.1 compute API version: %s',
+ versions)
+ return
+
+ # The max compute API version in Queens is 2.60 so we cap
+ # at that version.
+ queens = api_version_request.APIVersionRequest('2.60')
+ if max_version > queens:
+ raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
@@ -501,7 +530,7 @@
# is already detached.
pass
- def attach_volume(self, server, volume, device=None):
+ def attach_volume(self, server, volume, device=None, tag=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -510,10 +539,14 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
+ :param tag: Optional device role tag to apply to the volume.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
+ if tag:
+ attach_kwargs['tag'] = tag
+
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index 54a6da8..49125d1 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -22,6 +22,7 @@
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
+ max_microversion = '2.35'
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index b0d527c..ff8ed61 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -19,6 +19,7 @@
from tempest.api.compute import base
from tempest.common import utils
from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -31,18 +32,11 @@
LOG = logging.getLogger(__name__)
-class DeviceTaggingTest(base.BaseV2ComputeTest):
-
- min_microversion = '2.32'
- # NOTE(mriedem): max_version looks odd but it's actually correct. Due to a
- # bug in the 2.32 microversion, tags on block devices only worked with the
- # 2.32 microversion specifically. And tags on networks only worked between
- # 2.32 and 2.36 inclusive; the 2.37 microversion broke tags for networks.
- max_microversion = '2.32'
+class DeviceTaggingBase(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
- super(DeviceTaggingTest, cls).skip_checks()
+ super(DeviceTaggingBase, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron is required')
if not CONF.validation.run_validation:
@@ -54,7 +48,7 @@
@classmethod
def setup_clients(cls):
- super(DeviceTaggingTest, cls).setup_clients()
+ super(DeviceTaggingBase, cls).setup_clients()
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.subnets_client = cls.os_primary.subnets_client
@@ -64,7 +58,57 @@
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
- super(DeviceTaggingTest, cls).setup_credentials()
+ super(DeviceTaggingBase, cls).setup_credentials()
+
+ def verify_metadata_from_api(self, server, ssh_client, verify_method):
+ md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+ LOG.info('Attempting to verify tagged devices in server %s via '
+ 'the metadata service: %s', server['id'], md_url)
+
+ def get_and_verify_metadata():
+ try:
+ ssh_client.exec_command('curl -V')
+ except exceptions.SSHExecCommandFailed:
+ if not CONF.compute_feature_enabled.config_drive:
+ raise self.skipException('curl not found in guest '
+ 'and config drive is '
+ 'disabled')
+ LOG.warning('curl was not found in the guest, device '
+ 'tagging metadata was not checked in the '
+ 'metadata API')
+ return True
+ cmd = 'curl %s' % md_url
+ md_json = ssh_client.exec_command(cmd)
+ return verify_method(md_json)
+ # NOTE(gmann) Keep refreshing the metadata info until the metadata
+ # cache is refreshed. For safer side, we will go with wait loop of
+ # build_interval till build_timeout. verify_method() above will return
+ # True if all metadata verification is done as expected.
+ if not test_utils.call_until_true(get_and_verify_metadata,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise exceptions.TimeoutException('Timeout while verifying '
+ 'metadata on server.')
+
+ def verify_metadata_on_config_drive(self, server, ssh_client,
+ verify_method):
+ LOG.info('Attempting to verify tagged devices in server %s via '
+ 'the config drive.', server['id'])
+ ssh_client.mount_config_drive()
+ cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
+ md_json = ssh_client.exec_command(cmd_md)
+ verify_method(md_json)
+ ssh_client.unmount_config_drive()
+
+
+class TaggedBootDevicesTest(DeviceTaggingBase):
+
+ min_microversion = '2.32'
+ # NOTE(mriedem): max_version looks odd but it's actually correct. Due to a
+ # bug in the 2.32 microversion, tags on block devices only worked with the
+ # 2.32 microversion specifically. And tags on networks only worked between
+ # 2.32 and 2.36 inclusive; the 2.37 microversion broke tags for networks.
+ max_microversion = '2.32'
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
@@ -79,20 +123,24 @@
if d['mac'] == self.net_2_200_mac:
self.assertEqual(d['tags'], ['net-2-200'])
- # A hypervisor may present multiple paths to a tagged disk, so
- # there may be duplicated tags in the metadata, use set() to
- # remove duplicated tags.
- # Some hypervisors might report devices with no tags as well.
- found_devices = [d['tags'][0] for d in md_dict['devices']
- if d.get('tags')]
+ # A hypervisor may present multiple paths to a tagged disk, so
+ # there may be duplicated tags in the metadata, use set() to
+ # remove duplicated tags.
+ # Some hypervisors might report devices with no tags as well.
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
+ try:
self.assertEqual(set(found_devices), set(['port-1', 'port-2',
'net-1', 'net-2-100',
'net-2-200', 'boot',
'other']))
+ return True
+ except Exception:
+ return False
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
@utils.services('network', 'volume', 'image')
- def test_device_tagging(self):
+ def test_tagged_boot_devices(self):
# Create volumes
# The create_volume methods waits for the volumes to be available and
# the base class will clean them up on tearDown.
@@ -134,7 +182,6 @@
self.addCleanup(self.ports_client.delete_port, self.port2['id'])
# Create server
- admin_pass = data_utils.rand_password()
config_drive_enabled = CONF.compute_feature_enabled.config_drive
validation_resources = self.get_test_validation_resources(
self.os_primary)
@@ -144,7 +191,6 @@
wait_until='ACTIVE',
validation_resources=validation_resources,
config_drive=config_drive_enabled,
- adminPass=admin_pass,
name=data_utils.rand_name('device-tagging-server'),
networks=[
# Validation network for ssh
@@ -209,11 +255,10 @@
self.addCleanup(self.delete_server, server['id'])
server = self.servers_client.show_server(server['id'])['server']
- self.ssh_client = remote_client.RemoteClient(
+ ssh_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
- admin_pass,
- validation_resources['keypair']['private_key'],
+ pkey=validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
@@ -233,46 +278,107 @@
self.assertTrue(self.net_2_100_mac)
self.assertTrue(self.net_2_200_mac)
- # Verify metadata from metadata service
+ # Verify metadata from metadata API
if CONF.compute_feature_enabled.metadata_service:
- md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
- LOG.info('Attempting to verify tagged devices in server %s via '
- 'the metadata service: %s', server['id'], md_url)
-
- def get_and_verify_metadata():
- try:
- self.ssh_client.exec_command('curl -V')
- except exceptions.SSHExecCommandFailed:
- if not CONF.compute_feature_enabled.config_drive:
- raise self.skipException('curl not found in guest '
- 'and config drive is '
- 'disabled')
- LOG.warning('curl was not found in the guest, device '
- 'tagging metadata was not checked in the '
- 'metadata API')
- return True
- cmd = 'curl %s' % md_url
- md_json = self.ssh_client.exec_command(cmd)
- self.verify_device_metadata(md_json)
- return True
-
- if not test_utils.call_until_true(get_and_verify_metadata,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- raise exceptions.TimeoutException('Timeout while verifying '
- 'metadata on server.')
+ self.verify_metadata_from_api(server, ssh_client,
+ self.verify_device_metadata)
# Verify metadata on config drive
if CONF.compute_feature_enabled.config_drive:
- LOG.info('Attempting to verify tagged devices in server %s via '
- 'the config drive.', server['id'])
- self.ssh_client.mount_config_drive()
- cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
- md_json = self.ssh_client.exec_command(cmd_md)
- self.verify_device_metadata(md_json)
- self.ssh_client.unmount_config_drive()
+ self.verify_metadata_on_config_drive(server, ssh_client,
+ self.verify_device_metadata)
-class DeviceTaggingTestV2_42(DeviceTaggingTest):
+class TaggedBootDevicesTest_v242(TaggedBootDevicesTest):
min_microversion = '2.42'
max_microversion = 'latest'
+
+
+class TaggedAttachmentsTest(DeviceTaggingBase):
+
+ min_microversion = '2.49'
+ max_microversion = 'latest'
+
+ @classmethod
+ def skip_checks(cls):
+ super(TaggedAttachmentsTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.metadata_service:
+ raise cls.skipException('Metadata API must be enabled')
+
+ def verify_device_metadata(self, md_json):
+ md_dict = json.loads(md_json)
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
+ try:
+ self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+ return True
+ except Exception:
+ return False
+
+ def verify_empty_devices(self, md_json):
+ md_dict = json.loads(md_json)
+ try:
+ self.assertEmpty(md_dict['devices'])
+ return True
+ except Exception:
+ return False
+
+ @decorators.idempotent_id('3e41c782-2a89-4922-a9d2-9a188c4e7c7c')
+ @utils.services('network', 'volume', 'image')
+ def test_tagged_attachment(self):
+ # Create network
+ net = self.networks_client.create_network(
+ name=data_utils.rand_name(
+ 'tagged-attachments-test-net'))['network']
+ self.addCleanup(self.networks_client.delete_network, net['id'])
+
+ # Create subnet
+ subnet = self.subnets_client.create_subnet(
+ network_id=net['id'],
+ cidr='10.10.10.0/24',
+ ip_version=4)['subnet']
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+
+ # Create volume
+ volume = self.create_volume()
+
+ # Boot test server
+ config_drive_enabled = CONF.compute_feature_enabled.config_drive
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+
+ server = self.create_test_server(
+ validatable=True,
+ validation_resources=validation_resources,
+ config_drive=config_drive_enabled,
+ name=data_utils.rand_name('device-tagging-server'),
+ networks=[{'uuid': self.get_tenant_network()['id']}])
+ self.addCleanup(self.delete_server, server['id'])
+
+ # Attach tagged nic and volume
+ interface = self.interfaces_client.create_interface(
+ server['id'], net_id=net['id'],
+ tag='nic-tag')['interfaceAttachment']
+ self.attach_volume(server, volume, tag='volume-tag')
+
+ ssh_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ CONF.validation.image_ssh_user,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+
+ self.verify_metadata_from_api(server, ssh_client,
+ self.verify_device_metadata)
+
+ # Detach tagged nic and volume
+ self.servers_client.detach_volume(server['id'], volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ self.interfaces_client.delete_interface(server['id'],
+ interface['port_id'])
+ waiters.wait_for_interface_detach(self.interfaces_client,
+ server['id'],
+ interface['port_id'])
+ self.verify_metadata_from_api(server, ssh_client,
+ self.verify_empty_devices)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index bbec30c..9fc5af0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -393,7 +393,7 @@
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
- name=backup1).response
+ name=backup1)
oldest_backup_exist = True
# the oldest one should be deleted automatically in this test
@@ -409,10 +409,10 @@
"deleted during rotation.", oldest_backup)
if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ "OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image1_id = resp['image_id']
else:
- image1_id = data_utils.parse_image_id(resp['location'])
+ image1_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
waiters.wait_for_image_status(glance_client,
image1_id, 'active')
@@ -422,12 +422,12 @@
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
- name=backup2).response
+ name=backup2)
if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ "OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image2_id = resp['image_id']
else:
- image2_id = data_utils.parse_image_id(resp['location'])
+ image2_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(glance_client.delete_image, image2_id)
waiters.wait_for_image_status(glance_client,
image2_id, 'active')
@@ -465,12 +465,12 @@
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
- name=backup3).response
+ name=backup3)
if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", resp, "lt"):
+ "OpenStack-API-Version", "compute 2.45", resp.response, "lt"):
image3_id = resp['image_id']
else:
- image3_id = data_utils.parse_image_id(resp['location'])
+ image3_id = data_utils.parse_image_id(resp.response['location'])
self.addCleanup(glance_client.delete_image, image3_id)
# the first back up should be deleted
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@@ -603,6 +603,20 @@
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
glance_client.wait_for_resource_deletion(images[0]['id'])
+ @decorators.idempotent_id('8cf9f450-a871-42cf-9bef-77eba189c0b0')
+ @decorators.related_bug('1745529')
+ @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
+ 'Shelve is not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
+ def test_shelve_paused_server(self):
+ server = self.create_test_server(wait_until='ACTIVE')
+ self.client.pause_server(server['id'])
+ waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
+ # Check if Shelve operation is successful on paused server.
+ compute.shelve_server(self.client, server['id'],
+ force_shelve_offload=True)
+
@decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
def test_stop_start_server(self):
self.client.stop_server(self.server_id)
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 543fa1c..56d973e 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -209,3 +209,34 @@
server['id'], 'ACTIVE')
# Checking list details API response schema
self.servers_client.list_servers(detail=True)
+
+
+class ServerShowV263Test(base.BaseV2ComputeTest):
+ min_microversion = '2.63'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
+ def test_show_update_rebuild_list_server(self):
+ trusted_certs = ['test-cert-1', 'test-cert-2']
+ server = self.create_test_server(
+ trusted_image_certificates=trusted_certs,
+ wait_until='ACTIVE')
+
+ # Check show API response schema
+ self.servers_client.show_server(server['id'])['server']
+
+ # Check update API response schema
+ self.servers_client.update_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ # Check rebuild API response schema
+ self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ # Check list details API response schema
+ params = {'trusted_image_certificates': trusted_certs}
+ servers = self.servers_client.list_servers(
+ detail=True, **params)['servers']
+ self.assertNotEmpty(servers)
diff --git a/tempest/api/compute/servers/test_servers_microversions.py b/tempest/api/compute/servers/test_servers_microversions.py
index f3863f1..2434884 100644
--- a/tempest/api/compute/servers/test_servers_microversions.py
+++ b/tempest/api/compute/servers/test_servers_microversions.py
@@ -49,3 +49,18 @@
key_name=keypair_name)
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
+
+
+class ServerShowV257Test(base.BaseV2ComputeTest):
+ min_microversion = '2.57'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('803df848-080a-4261-8f11-b020cd9b6f60')
+ def test_rebuild_server(self):
+ server = self.create_test_server(wait_until='ACTIVE')
+ user_data = "ZWNobyAiaGVsbG8gd29ybGQi"
+ # Checking rebuild API response schema
+ self.servers_client.rebuild_server(server['id'], self.image_ref_alt,
+ user_data=user_data)
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 90f04ff..5fb1711 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -25,8 +25,12 @@
CONF = config.CONF
+# TODO(mriedem): Remove this test class once the nova queens branch goes into
+# extended maintenance mode.
class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
+ depends_on_nova_network = True
+
@classmethod
def setup_credentials(cls):
# This test needs a network and a subnet
@@ -50,8 +54,6 @@
# for a given server_id
if CONF.service_available.neutron:
- # TODO(mriedem): After a microversion implements the API for
- # neutron, a 400 should be a failure for nova-network and neutron.
with testtools.ExpectedException(exceptions.BadRequest):
self.client.list_virtual_interfaces(self.server['id'])
else:
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index c4e2400..ec4d7a8 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -20,8 +20,12 @@
from tempest.lib import exceptions as lib_exc
+# TODO(mriedem): Remove this test class once the nova queens branch goes into
+# extended maintenance mode.
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
+ depends_on_nova_network = True
+
@classmethod
def setup_credentials(cls):
# For this test no network resources are needed
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 7cf90ae..a62492d 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -43,14 +43,19 @@
super(QuotasTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
cls.user_id = cls.client.user_id
- cls.default_quota_set = set(('injected_file_content_bytes',
- 'metadata_items', 'injected_files',
- 'ram', 'floating_ips',
- 'fixed_ips', 'key_pairs',
- 'injected_file_path_bytes',
- 'instances', 'security_group_rules',
- 'cores', 'security_groups',
+ cls.default_quota_set = set(('metadata_items', 'ram', 'key_pairs',
+ 'instances', 'cores',
'server_group_members', 'server_groups'))
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['fixed_ips', 'floating_ips',
+ 'security_group_rules',
+ 'security_groups'])
+ if cls.is_requested_microversion_compatible('2.56'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['injected_file_content_bytes',
+ 'injected_file_path_bytes',
+ 'injected_files'])
@decorators.idempotent_id('f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107')
def test_get_quotas(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 7a74869..8618148 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -56,3 +56,16 @@
self.assertRaises(lib_exc.BadRequest,
self.attach_volume, server, volume)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('ee37a796-2afb-11e7-bc0f-fa163e65f5ce')
+ def test_attach_attached_volume_to_different_server(self):
+ server1 = self.create_test_server(wait_until='ACTIVE')
+ volume = self.create_volume()
+
+ self.attach_volume(server1, volume)
+
+ # Create server2 and attach in-use volume
+ server2 = self.create_test_server(wait_until='ACTIVE')
+ self.assertRaises(lib_exc.BadRequest,
+ self.attach_volume, server2, volume)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 69cac33..62ced19 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -384,12 +384,23 @@
rules = self.roles_client.list_all_role_inference_rules()[
'role_inferences']
+
+ # NOTE(jaosorior): With the work related to the define-default-roles
+ # blueprint, we now have 'admin', 'member' and 'reader' by default. So
+ # we filter every other implied role to only take into account the ones
+ # relates to this test.
+ relevant_roles = (self.roles[0]['id'], self.roles[1]['id'],
+ self.roles[2]['id'], self.role['id'])
+
+ def is_implied_role_relevant(rule):
+ return any(r for r in rule['implies'] if r['id'] in relevant_roles)
+
+ relevant_rules = filter(is_implied_role_relevant, rules)
# Sort the rules by the number of inferences, since there should be 1
# inference between "roles[2]" and "role" and 2 inferences for
# "roles[0]": between "roles[1]" and "roles[2]".
- sorted_rules = sorted(rules, key=lambda r: len(r['implies']))
+ sorted_rules = sorted(relevant_rules, key=lambda r: len(r['implies']))
- # Check that 2 sets of rules are returned.
self.assertEqual(2, len(sorted_rules))
# Check that only 1 inference rule exists between "roles[2]" and "role"
self.assertEqual(1, len(sorted_rules[0]['implies']))
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index 5aa337c..be0c4c6 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -22,7 +22,6 @@
class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
- force_tenant_isolation = True
credentials = ['primary', 'alt', 'admin']
@classmethod
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index 45f4caa..731a055 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -157,6 +157,57 @@
waiters.wait_for_volume_resource_status(
self.groups_client, grp2['id'], 'available')
+ @decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127')
+ @decorators.related_bug('1739031')
+ def test_delete_group_snapshots_following_updated_volumes(self):
+ volume_type = self.create_volume_type()
+
+ group_type = self.create_group_type()
+
+ # Create a volume group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Note: When dealing with consistency groups all volumes must
+ # reside on the same backend. Adding volumes to the same consistency
+ # group from multiple backends isn't supported. In order to ensure all
+ # volumes share the same backend, all volumes must share same
+ # volume-type and group id.
+ volume_list = []
+ for _ in range(2):
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+ volume_list.append(volume['id'])
+
+ for vol in volume_list:
+ self.groups_client.update_group(grp['id'],
+ remove_volumes=vol)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ self.groups_client.update_group(grp['id'],
+ add_volumes=vol)
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp['id'], 'available')
+
+ # Verify the created volumes are associated with consistency group
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ grp_vols = [v for v in vols if v['group_id'] == grp['id']]
+ self.assertEqual(2, len(grp_vols))
+
+ # Create a snapshot group
+ group_snapshot = self._create_group_snapshot(group_id=grp['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+
+ for snap in snapshots:
+ if snap['volume_id'] in volume_list:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Delete a snapshot group
+ self._delete_group_snapshot(group_snapshot)
+
class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
_api_version = 3
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index c0891e4..c5c70d2 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -29,6 +29,10 @@
if not CONF.volume_feature_enabled.multi_backend:
raise cls.skipException("Cinder multi-backend feature disabled")
+ if len(set(CONF.volume.backend_names)) < 2:
+ raise cls.skipException("Requires at least two different "
+ "backend names")
+
@classmethod
def resource_setup(cls):
super(VolumeMultiBackendTest, cls).resource_setup()
@@ -41,9 +45,6 @@
# Volume/Type creation (uses volume_backend_name)
# It is not allowed to create the same backend name twice
- if len(backend_names) < 2:
- raise cls.skipException("Requires at least two different "
- "backend names")
for backend_name in backend_names:
# Volume/Type creation (uses backend_name)
cls._create_type_and_volume(backend_name, False)
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 6f9daa8..e546bff 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -13,10 +13,8 @@
# under the License.
from tempest.api.volume import base
-from tempest.common import identity
from tempest.common import tempest_fixtures as fixtures
from tempest.common import waiters
-from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
@@ -25,8 +23,6 @@
class BaseVolumeQuotasAdminTestJSON(base.BaseVolumeAdminTest):
- force_tenant_isolation = True
-
credentials = ['primary', 'alt', 'admin']
def setUp(self):
@@ -45,6 +41,19 @@
cls.transfer_client = cls.os_primary.volume_transfers_v2_client
cls.alt_transfer_client = cls.os_alt.volume_transfers_v2_client
+ @classmethod
+ def resource_setup(cls):
+ super(BaseVolumeQuotasAdminTestJSON, cls).resource_setup()
+
+ # Save the current set of quotas so that some tests may use it
+ # to restore the quotas to their original values after they are
+ # done.
+ cls.original_quota_set = (cls.admin_quotas_client.show_quota_set(
+ cls.demo_tenant_id)['quota_set'])
+ cls.cleanup_quota_set = dict(
+ (k, v) for k, v in cls.original_quota_set.items()
+ if k in QUOTA_KEYS)
+
@decorators.idempotent_id('59eada70-403c-4cef-a2a3-a8ce2f1b07a0')
def test_list_quotas(self):
quotas = (self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
@@ -62,8 +71,6 @@
@decorators.idempotent_id('3d45c99e-cc42-4424-a56e-5cbd212b63a6')
def test_update_all_quota_resources_for_tenant(self):
# Admin can update all the resource quota limits for a tenant
- default_quota_set = self.admin_quotas_client.show_default_quota_set(
- self.demo_tenant_id)['quota_set']
new_quota_set = {'gigabytes': 1009,
'volumes': 11,
'snapshots': 11,
@@ -76,11 +83,9 @@
self.demo_tenant_id,
**new_quota_set)['quota_set']
- cleanup_quota_set = dict(
- (k, v) for k, v in default_quota_set.items()
- if k in QUOTA_KEYS)
self.addCleanup(self.admin_quotas_client.update_quota_set,
- self.demo_tenant_id, **cleanup_quota_set)
+ self.demo_tenant_id, **self.cleanup_quota_set)
+
# test that the specific values we set are actually in
# the final result. There is nothing here that ensures there
# would be no other values in there.
@@ -96,6 +101,25 @@
for usage_key in QUOTA_USAGE_KEYS:
self.assertIn(usage_key, quota_usage[key])
+ @decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
+ def test_delete_quota(self):
+ # Admin can delete the resource quota set for a project
+
+ self.addCleanup(self.admin_quotas_client.update_quota_set,
+ self.demo_tenant_id, **self.cleanup_quota_set)
+
+ quota_set_default = self.admin_quotas_client.show_default_quota_set(
+ self.demo_tenant_id)['quota_set']
+ volume_default = quota_set_default['volumes']
+
+ self.admin_quotas_client.update_quota_set(
+ self.demo_tenant_id, volumes=(volume_default + 5))
+
+ self.admin_quotas_client.delete_quota_set(self.demo_tenant_id)
+ quota_set_new = (self.admin_quotas_client.show_quota_set(
+ self.demo_tenant_id)['quota_set'])
+ self.assertEqual(volume_default, quota_set_new['volumes'])
+
@decorators.idempotent_id('ae8b6091-48ad-4bfa-a188-bbf5cc02115f')
def test_quota_usage(self):
quota_usage = self.admin_quotas_client.show_quota_set(
@@ -115,28 +139,6 @@
volume["size"],
new_quota_usage['gigabytes']['in_use'])
- @decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
- def test_delete_quota(self):
- # Admin can delete the resource quota set for a project
- project_name = data_utils.rand_name('quota_tenant')
- description = data_utils.rand_name('desc_')
- project = identity.identity_utils(self.os_admin).create_project(
- project_name, description=description)
- project_id = project['id']
- self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
- project_id)
- quota_set_default = self.admin_quotas_client.show_default_quota_set(
- project_id)['quota_set']
- volume_default = quota_set_default['volumes']
-
- self.admin_quotas_client.update_quota_set(
- project_id, volumes=(volume_default + 5))
-
- self.admin_quotas_client.delete_quota_set(project_id)
- quota_set_new = (self.admin_quotas_client.show_quota_set(project_id)
- ['quota_set'])
- self.assertEqual(volume_default, quota_set_new['volumes'])
-
@decorators.idempotent_id('8911036f-9d54-4720-80cc-a1c9796a8805')
def test_quota_usage_after_volume_transfer(self):
# Create a volume for transfer
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index d127b5f..f50f336 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -19,10 +19,11 @@
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
+QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
+ 'backup_gigabytes', 'per_volume_gigabytes']
class BaseVolumeQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
- force_tenant_isolation = True
@classmethod
def setup_credentials(cls):
@@ -32,11 +33,23 @@
@classmethod
def resource_setup(cls):
super(BaseVolumeQuotasNegativeTestJSON, cls).resource_setup()
+
+ # Save the current set of quotas, then set up the cleanup method
+ # to restore the quotas to their original values after the tests
+ # from this class are done. This is needed just in case Tempest is
+ # configured to use pre-provisioned projects/user accounts.
+ cls.original_quota_set = (cls.admin_quotas_client.show_quota_set(
+ cls.demo_tenant_id)['quota_set'])
+ cls.cleanup_quota_set = dict(
+ (k, v) for k, v in cls.original_quota_set.items()
+ if k in QUOTA_KEYS)
+ cls.addClassResourceCleanup(cls.admin_quotas_client.update_quota_set,
+ cls.demo_tenant_id,
+ **cls.cleanup_quota_set)
+
cls.shared_quota_set = {'gigabytes': 2 * CONF.volume.volume_size,
'volumes': 1}
- # NOTE(gfidente): no need to restore original quota set
- # after the tests as they only work with dynamic credentials.
cls.admin_quotas_client.update_quota_set(
cls.demo_tenant_id,
**cls.shared_quota_set)
diff --git a/tempest/api/volume/admin/test_volume_services_negative.py b/tempest/api/volume/admin/test_volume_services_negative.py
new file mode 100644
index 0000000..6f3dbc6
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services_negative.py
@@ -0,0 +1,65 @@
+# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class VolumeServicesNegativeTest(base.BaseVolumeAdminTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(VolumeServicesNegativeTest, cls).resource_setup()
+ cls.services = cls.admin_volume_services_client.list_services()[
+ 'services']
+ cls.host = cls.services[0]['host']
+ cls.binary = cls.services[0]['binary']
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
+ def test_enable_service_with_invalid_host(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_volume_services_client.enable_service,
+ host='invalid_host', binary=self.binary)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('c571f179-c6e6-4c50-a0ab-368b628a8ac1')
+ def test_disable_service_with_invalid_binary(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_volume_services_client.disable_service,
+ host=self.host, binary='invalid_binary')
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('77767b36-5e8f-4c68-a0b5-2308cc21ec64')
+ def test_disable_log_reason_with_no_reason(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.disable_log_reason,
+ host=self.host, binary=self.binary,
+ disabled_reason=None)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('712bfab8-1f44-4eb5-a632-fa70bf78f05e')
+ def test_freeze_host_with_invalid_host(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.freeze_host,
+ host='invalid_host')
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('7c6287c9-d655-47e1-9a11-76f6657a6dce')
+ def test_thaw_host_with_invalid_host(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.admin_volume_services_client.thaw_host,
+ host='invalid_host')
diff --git a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
index 0f4e90f..74eb792 100644
--- a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
@@ -19,10 +19,11 @@
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
+QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
+ 'backup_gigabytes', 'per_volume_gigabytes']
class VolumeSnapshotQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
- force_tenant_isolation = True
@classmethod
def skip_checks(cls):
@@ -38,12 +39,24 @@
@classmethod
def resource_setup(cls):
super(VolumeSnapshotQuotasNegativeTestJSON, cls).resource_setup()
+
+ # Save the current set of quotas, then set up the cleanup method
+ # to restore the quotas to their original values after the tests
+ # from this class are done. This is needed just in case Tempest is
+ # configured to use pre-provisioned projects/user accounts.
+ cls.original_quota_set = (cls.admin_quotas_client.show_quota_set(
+ cls.demo_tenant_id)['quota_set'])
+ cls.cleanup_quota_set = dict(
+ (k, v) for k, v in cls.original_quota_set.items()
+ if k in QUOTA_KEYS)
+ cls.addClassResourceCleanup(cls.admin_quotas_client.update_quota_set,
+ cls.demo_tenant_id,
+ **cls.cleanup_quota_set)
+
cls.default_volume_size = CONF.volume.volume_size
cls.shared_quota_set = {'gigabytes': 3 * cls.default_volume_size,
'volumes': 1, 'snapshots': 1}
- # NOTE(gfidente): no need to restore original quota set
- # after the tests as they only work with tenant isolation.
cls.admin_quotas_client.update_quota_set(
cls.demo_tenant_id,
**cls.shared_quota_set)
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 375aacb..45060d0 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -59,9 +59,9 @@
volume = self.create_volume()
# Create backup
backup_name = data_utils.rand_name(self.__class__.__name__ + '-Backup')
- backup = (self.create_backup(backup_client=self.admin_backups_client,
- volume_id=volume['id'],
- name=backup_name))
+ backup = self.create_backup(volume_id=volume['id'], name=backup_name)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.assertEqual(backup_name, backup['name'])
# Export Backup
@@ -103,21 +103,22 @@
self.assertIn(new_id, [b['id'] for b in backups])
# Restore backup
- restore = self.admin_backups_client.restore_backup(
- backup['id'])['restore']
- self.addCleanup(self.admin_volume_client.delete_volume,
+ restore = self.backups_client.restore_backup(backup['id'])['restore']
+ self.addCleanup(self.volumes_client.delete_volume,
restore['volume_id'])
self.assertEqual(backup['id'], restore['backup_id'])
- waiters.wait_for_volume_resource_status(self.admin_volume_client,
- restore['volume_id'],
- 'available')
+
+ # When restore operation is performed then, backup['id']
+ # goes to 'restoring' state so we need to wait for
+ # backup['id'] to become 'available'.
+ waiters.wait_for_volume_resource_status(
+ self.backups_client, backup['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, restore['volume_id'], 'available')
# Verify if restored volume is there in volume list
- volumes = self.admin_volume_client.list_volumes()['volumes']
+ volumes = self.volumes_client.list_volumes()['volumes']
self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
- waiters.wait_for_volume_resource_status(self.admin_backups_client,
- import_backup['id'],
- 'available')
@decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
def test_volume_backup_reset_status(self):
@@ -126,12 +127,12 @@
# Create a backup
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
- backup = self.create_backup(backup_client=self.admin_backups_client,
- volume_id=volume['id'],
- name=backup_name)
+ backup = self.create_backup(volume_id=volume['id'], name=backup_name)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.assertEqual(backup_name, backup['name'])
# Reset backup status to error
self.admin_backups_client.reset_backup_status(backup_id=backup['id'],
status="error")
- waiters.wait_for_volume_resource_status(self.admin_backups_client,
+ waiters.wait_for_volume_resource_status(self.backups_client,
backup['id'], 'error')
diff --git a/tempest/api/volume/test_volume_absolute_limits.py b/tempest/api/volume/test_volume_absolute_limits.py
index 4018468..00a3375 100644
--- a/tempest/api/volume/test_volume_absolute_limits.py
+++ b/tempest/api/volume/test_volume_absolute_limits.py
@@ -17,7 +17,6 @@
from tempest import config
from tempest.lib import decorators
-
CONF = config.CONF
@@ -32,9 +31,16 @@
@classmethod
def resource_setup(cls):
super(AbsoluteLimitsTests, cls).resource_setup()
+
# Create a shared volume for tests
cls.volume = cls.create_volume()
+ @classmethod
+ def skip_checks(cls):
+ super(AbsoluteLimitsTests, cls).skip_checks()
+ if not CONF.auth.use_dynamic_credentials:
+ raise cls.skipException("Must use dynamic credentials.")
+
@decorators.idempotent_id('8e943f53-e9d6-4272-b2e9-adcf2f7c29ad')
def test_get_volume_absolute_limits(self):
# get volume limit for a tenant
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4108da5..75e81b7 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common import waiters
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -43,6 +44,9 @@
transfer = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']
transfer_id = transfer['id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_volume_transfer,
+ transfer_id)
auth_key = transfer['auth_key']
waiters.wait_for_volume_resource_status(
self.volumes_client, volume['id'], 'awaiting-transfer')
@@ -81,6 +85,9 @@
# Create a volume transfer
transfer_id = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']['id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_volume_transfer,
+ transfer_id)
waiters.wait_for_volume_resource_status(
self.volumes_client, volume['id'], 'awaiting-transfer')
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 07cfad5..c178272 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -117,6 +117,8 @@
self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'],
name=backup_name, force=True)
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
self.assertEqual(backup_name, backup['name'])
@decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
@@ -132,6 +134,8 @@
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
@@ -160,6 +164,8 @@
# Create volume and backup
volume = self.create_volume()
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Update backup and assert response body for update_backup method
update_kwargs = {
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 52114bc..93638b8 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -163,6 +164,8 @@
backup = self.create_backup(volume_id=self.volume_origin['id'],
snapshot_id=snapshot['id'])
+ waiters.wait_for_volume_resource_status(self.snapshots_client,
+ snapshot['id'], 'available')
backup_info = self.backups_client.show_backup(backup['id'])['backup']
self.assertEqual(self.volume_origin['id'], backup_info['volume_id'])
self.assertEqual(snapshot['id'], backup_info['snapshot_id'])
diff --git a/tempest/clients.py b/tempest/clients.py
index 707127c..2a07be9 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -241,32 +241,32 @@
# if only api_v3 is enabled, all these clients should be available
if (CONF.volume_feature_enabled.api_v2 or
CONF.volume_feature_enabled.api_v3):
- self.backups_v2_client = self.volume_v2.BackupsClient()
+ self.backups_v2_client = self.volume_v3.BackupsClient()
self.encryption_types_v2_client = \
- self.volume_v2.EncryptionTypesClient()
+ self.volume_v3.EncryptionTypesClient()
self.snapshot_manage_v2_client = \
- self.volume_v2.SnapshotManageClient()
- self.snapshots_v2_client = self.volume_v2.SnapshotsClient()
+ self.volume_v3.SnapshotManageClient()
+ self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
self.volume_capabilities_v2_client = \
- self.volume_v2.CapabilitiesClient()
- self.volume_manage_v2_client = self.volume_v2.VolumeManageClient()
- self.volume_qos_v2_client = self.volume_v2.QosSpecsClient()
- self.volume_services_v2_client = self.volume_v2.ServicesClient()
- self.volume_types_v2_client = self.volume_v2.TypesClient()
- self.volume_hosts_v2_client = self.volume_v2.HostsClient()
- self.volume_quotas_v2_client = self.volume_v2.QuotasClient()
+ self.volume_v3.CapabilitiesClient()
+ self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
+ self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
+ self.volume_services_v2_client = self.volume_v3.ServicesClient()
+ self.volume_types_v2_client = self.volume_v3.TypesClient()
+ self.volume_hosts_v2_client = self.volume_v3.HostsClient()
+ self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
self.volume_quota_classes_v2_client = \
- self.volume_v2.QuotaClassesClient()
+ self.volume_v3.QuotaClassesClient()
self.volume_scheduler_stats_v2_client = \
- self.volume_v2.SchedulerStatsClient()
+ self.volume_v3.SchedulerStatsClient()
self.volume_transfers_v2_client = \
- self.volume_v2.TransfersClient()
+ self.volume_v3.TransfersClient()
self.volume_v2_availability_zone_client = \
- self.volume_v2.AvailabilityZoneClient()
- self.volume_v2_limits_client = self.volume_v2.LimitsClient()
- self.volumes_v2_client = self.volume_v2.VolumesClient()
+ self.volume_v3.AvailabilityZoneClient()
+ self.volume_v2_limits_client = self.volume_v3.LimitsClient()
+ self.volumes_v2_client = self.volume_v3.VolumesClient()
self.volumes_v2_extension_client = \
- self.volume_v2.ExtensionsClient()
+ self.volume_v3.ExtensionsClient()
# Set default client for users that don't need explicit version
self.volumes_client_latest = self.volumes_v2_client
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 72ee715..a27425c 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -244,7 +244,7 @@
'each newline')
parser.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
- 'this file contains a seperate test '
+ 'this file contains a separate test '
'on each newline. This command'
'supports files created by the tempest'
'run ``--list-tests`` command')
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 08e2a12..0e86f05 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -287,3 +287,24 @@
raise lib_exc.TimeoutException(message)
return body
+
+
+def wait_for_interface_detach(client, server_id, port_id):
+ """Waits for an interface to be detached from a server."""
+ body = client.list_interfaces(server_id)['interfaceAttachments']
+ ports = [iface['port_id'] for iface in body]
+ start = int(time.time())
+
+ while port_id in ports:
+ time.sleep(client.build_interval)
+ body = client.list_interfaces(server_id)['interfaceAttachments']
+ ports = [iface['port_id'] for iface in body]
+ if port_id not in ports:
+ return body
+
+ timed_out = int(time.time()) - start >= client.build_timeout
+ if timed_out:
+ message = ('Interface %s failed to detach from server %s within '
+ 'the required time (%s s)' % (port_id, server_id,
+ client.build_timeout))
+ raise lib_exc.TimeoutException(message)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/__init__.py b/tempest/lib/api_schema/response/compute/v2_57/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_57/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_57/servers.py b/tempest/lib/api_schema/response/compute/v2_57/servers.py
new file mode 100644
index 0000000..ed1ca7d
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_57/servers.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_54 import servers as servers254
+# ****** Schemas changed in microversion 2.57 *****************
+
+# Note(gmann): This is schema for microversion 2.57 which includes the
+# 'user_data' in the Response body of the following APIs:
+# - ``POST '/servers/{server_id}/action (rebuild)``
+
+user_data = {
+ 'oneOf': [
+ {
+ 'type': 'string',
+ 'format': 'base64',
+ 'maxLength': 65535
+ },
+ {'type': 'null'}
+ ]
+}
+
+rebuild_server = copy.deepcopy(servers254.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+ 'properties'].update({'user_data': user_data})
+rebuild_server['response_body']['properties']['server'][
+ 'required'].append('user_data')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers254.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'properties'].update({'user_data': user_data})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'required'].append('user_data')
+
+# ****** Schemas unchanged in microversion 2.57 since microversion 2.54 ***
+
+# NOTE(gmann): Below are the unchanged schema in this microversion. We need
+# to keeo this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+get_server = copy.deepcopy(servers254.get_server)
+list_servers_detail = copy.deepcopy(servers254.list_servers_detail)
+update_server = copy.deepcopy(servers254.update_server)
diff --git a/tempest/lib/api_schema/response/compute/v2_63/__init__.py b/tempest/lib/api_schema/response/compute/v2_63/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_63/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
new file mode 100644
index 0000000..5cdaf54
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -0,0 +1,65 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_26 import servers as servers226
+from tempest.lib.api_schema.response.compute.v2_54 import servers as servers254
+from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+
+# Nova microversion 2.63 adds 'trusted_image_certificates' (a list of
+# certificate IDs) to the server rebuild and servers details responses.
+
+
+trusted_certs = {
+ 'type': ['array', 'null'],
+ 'minItems': 1,
+ 'maxItems': 50,
+ 'uniqueItems': True,
+ 'items': {
+ 'type': 'string',
+ 'minLength': 1
+ }
+}
+# list response schema wasn't changed for v2.63 so use v2.26
+list_servers = copy.deepcopy(servers226.list_servers)
+
+list_servers_detail = copy.deepcopy(servers254.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('trusted_image_certificates')
+
+rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+rebuild_server['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers257.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
+
+update_server = copy.deepcopy(servers254.update_server)
+update_server['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+update_server['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
+
+get_server = copy.deepcopy(servers254.get_server)
+get_server['response_body']['properties']['server'][
+ 'properties'].update({'trusted_image_certificates': trusted_certs})
+get_server['response_body']['properties']['server'][
+ 'required'].append('trusted_image_certificates')
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index bbdf382..9a35b76 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -12,9 +12,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import base64
import jsonschema
from oslo_utils import timeutils
+import six
# JSON Schema validator and format checker used for JSON Schema validation
JSONSCHEMA_VALIDATOR = jsonschema.Draft4Validator
@@ -37,3 +39,19 @@
return False
else:
return True
+
+
+@jsonschema.FormatChecker.cls_checks('base64')
+def _validate_base64_format(instance):
+ try:
+ if isinstance(instance, six.text_type):
+ instance = instance.encode('utf-8')
+ base64.decodestring(instance)
+ except base64.binascii.Error:
+ return False
+ except TypeError:
+ # The name must be string type. If instance isn't string type, the
+ # TypeError will be raised at here.
+ return False
+
+ return True
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
new file mode 100644
index 0000000..510fc36
--- /dev/null
+++ b/tempest/lib/common/thread.py
@@ -0,0 +1,29 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This make disable relative module import
+from __future__ import absolute_import
+
+
+import six
+
+if six.PY2:
+ # module thread is removed in Python 3
+ from thread import get_ident # noqa: H237,F401
+
+else:
+ # On Python3 thread module has been deprecated and get_ident has been moved
+ # to threading module
+ from threading import get_ident # noqa: F401
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index e75cdb5..0314356 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -30,7 +30,9 @@
from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
from tempest.lib.api_schema.response.compute.v2_54 import servers as schemav254
+from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
+from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -49,7 +51,9 @@
{'min': '2.26', 'max': '2.46', 'schema': schemav226},
{'min': '2.47', 'max': '2.47', 'schema': schemav247},
{'min': '2.48', 'max': '2.53', 'schema': schemav248},
- {'min': '2.54', 'max': None, 'schema': schemav254}]
+ {'min': '2.54', 'max': '2.56', 'schema': schemav254},
+ {'min': '2.57', 'max': '2.62', 'schema': schemav257},
+ {'min': '2.63', 'max': None, 'schema': schemav263}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
diff --git a/tempest/lib/services/network/agents_client.py b/tempest/lib/services/network/agents_client.py
index 9bdf090..a0f832e 100644
--- a/tempest/lib/services/network/agents_client.py
+++ b/tempest/lib/services/network/agents_client.py
@@ -18,35 +18,62 @@
class AgentsClient(base.BaseNetworkClient):
def update_agent(self, agent_id, **kwargs):
- """Update agent."""
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526673
+ """Update an agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#update-agent
+ """
uri = '/agents/%s' % agent_id
return self.update_resource(uri, kwargs)
def show_agent(self, agent_id, **fields):
+ """Show details for an agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#show-agent-details
+ """
uri = '/agents/%s' % agent_id
return self.show_resource(uri, **fields)
def list_agents(self, **filters):
+ """List all agents.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#list-all-agents
+ """
uri = '/agents'
return self.list_resources(uri, **filters)
def list_routers_on_l3_agent(self, agent_id):
+ """List routers that an l3 agent hosts.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#list-routers-hosted-by-an-l3-agent
+ """
uri = '/agents/%s/l3-routers' % agent_id
return self.list_resources(uri)
def create_router_on_l3_agent(self, agent_id, **kwargs):
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526670
+ """Add a router to an l3 agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#schedule-router-to-an-l3-agent
+ """
uri = '/agents/%s/l3-routers' % agent_id
return self.create_resource(uri, kwargs, expect_empty_body=True)
def delete_router_from_l3_agent(self, agent_id, router_id):
+ """Remove a router to an l3 agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/#remove-l3-router-from-an-l3-agent
+ """
uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
return self.delete_resource(uri)
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
index 0fac6bd..1fde79f 100644
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ b/tempest/lib/services/volume/v1/encryption_types_client.py
@@ -38,7 +38,7 @@
def show_encryption_type(self, volume_type_id):
"""Get the volume encryption type for the specified volume type.
- volume_type_id: Id of volume_type.
+ :param volume_type_id: Id of volume type.
"""
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
@@ -61,7 +61,7 @@
return rest_client.ResponseBody(resp, body)
def delete_encryption_type(self, volume_type_id):
- """Delete the encryption type for the specified volume-type."""
+ """Delete the encryption type for the specified volume type."""
resp, body = self.delete(
"/types/%s/encryption/provider" % volume_type_id)
self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v3/encryption_types_client.py b/tempest/lib/services/volume/v3/encryption_types_client.py
index 7443a87..03de187 100644
--- a/tempest/lib/services/volume/v3/encryption_types_client.py
+++ b/tempest/lib/services/volume/v3/encryption_types_client.py
@@ -38,7 +38,7 @@
def show_encryption_type(self, volume_type_id):
"""Get the volume encryption type for the specified volume type.
- volume_type_id: Id of volume_type.
+ :param volume_type_id: Id of volume type.
"""
url = "/types/%s/encryption" % volume_type_id
resp, body = self.get(url)
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 09036a4..22155a9 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -20,9 +20,15 @@
class ServicesClient(rest_client.RestClient):
- """Client class to send CRUD Volume API requests"""
+ """Client class to send CRUD Volume Services API requests"""
def list_services(self, **params):
+ """List all Cinder services.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#list-all-cinder-services
+ """
url = 'os-services'
if params:
url += '?%s' % urllib.urlencode(params)
@@ -31,3 +37,66 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def enable_service(self, **kwargs):
+ """Enable service on a host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#enable-a-cinder-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/enable', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def disable_service(self, **kwargs):
+ """Disable service on a host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#disable-a-cinder-service
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def disable_log_reason(self, **kwargs):
+ """Disable scheduling for a volume service and log disabled reason.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#log-disabled-cinder-service-information
+ """
+ put_body = json.dumps(kwargs)
+ resp, body = self.put('os-services/disable-log-reason', put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def freeze_host(self, **kwargs):
+ """Freeze a Cinder backend host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#freeze-a-cinder-backend-host
+ """
+ put_body = json.dumps(kwargs)
+ resp, _ = self.put('os-services/freeze', put_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp)
+
+ def thaw_host(self, **kwargs):
+ """Thaw a Cinder backend host.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#thaw-a-cinder-backend-host
+ """
+ put_body = json.dumps(kwargs)
+ resp, _ = self.put('os-services/thaw', put_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
index 298925a..f79bcd8 100644
--- a/tempest/lib/services/volume/v3/snapshots_client.py
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -176,11 +176,12 @@
return rest_client.ResponseBody(resp, body)
def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
- """Update metadata item for the snapshot."""
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
+ """Update metadata for the snapshot for a specific key.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-a-snapshot-s-metadata-for-a-specific-key
+ """
put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
resp, body = self.put(url, put_body)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 6d9d03a..13ecd15 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -21,7 +21,7 @@
class TypesClient(rest_client.RestClient):
- """Client class to send CRUD Volume API requests"""
+ """Client class to send CRUD Volume Types API requests"""
def is_resource_deleted(self, id):
try:
@@ -36,7 +36,7 @@
return 'volume-type'
def list_volume_types(self, **params):
- """List all the volume_types created.
+ """List all the volume types created.
For a full list of available parameters, please refer to the official
API reference:
@@ -52,7 +52,7 @@
return rest_client.ResponseBody(resp, body)
def show_volume_type(self, volume_type_id):
- """Returns the details of a single volume_type.
+ """Returns the details of a single volume type.
For a full list of available parameters, please refer to the official
API reference:
@@ -78,7 +78,7 @@
return rest_client.ResponseBody(resp, body)
def delete_volume_type(self, volume_type_id):
- """Deletes the Specified Volume_type.
+ """Deletes the specified volume type.
For a full list of available parameters, please refer to the official
API reference:
@@ -89,11 +89,11 @@
return rest_client.ResponseBody(resp, body)
def list_volume_types_extra_specs(self, volume_type_id, **params):
- """List all the volume_types extra specs created.
+ """List all the volume type extra specs created.
- TODO: Current api-site doesn't contain this API description.
- After fixing the api-site, we need to fix here also for putting
- the link to api-site.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#show-all-extra-specifications-for-volume-type
"""
url = 'types/%s/extra_specs' % volume_type_id
if params:
@@ -105,7 +105,7 @@
return rest_client.ResponseBody(resp, body)
def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
- """Returns the details of a single volume_type extra spec."""
+ """Returns the details of a single volume type extra spec."""
url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
resp, body = self.get(url)
body = json.loads(body)
@@ -113,10 +113,10 @@
return rest_client.ResponseBody(resp, body)
def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
- """Creates a new Volume_type extra spec.
+ """Creates new volume type extra specs.
- volume_type_id: Id of volume_type.
- extra_specs: A dictionary of values to be used as extra_specs.
+ :param volume_type_id: Id of volume type.
+ :param extra_specs: A dictionary of values to be used as extra_specs.
"""
url = "types/%s/extra_specs" % volume_type_id
post_body = json.dumps({'extra_specs': extra_specs})
@@ -126,7 +126,7 @@
return rest_client.ResponseBody(resp, body)
def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
- """Deletes the Specified Volume_type extra spec."""
+ """Deletes the specified volume type extra spec."""
resp, body = self.delete("types/%s/extra_specs/%s" % (
volume_type_id, extra_spec_name))
self.expected_success(202, resp.status)
@@ -149,10 +149,10 @@
extra_specs):
"""Update a volume_type extra spec.
- volume_type_id: Id of volume_type.
- extra_spec_name: Name of the extra spec to be updated.
- extra_spec: A dictionary of with key as extra_spec_name and the
- updated value.
+ :param volume_type_id: Id of volume type.
+ :param extra_spec_name: Name of the extra spec to be updated.
+ :param extra_specs: A dictionary of with key as extra_spec_name and the
+ updated value.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-extra-specification-for-volume-type
diff --git a/tempest/scenario/test_volume_backup_restore.py b/tempest/scenario/test_volume_backup_restore.py
index c23b564..8a8c54e 100644
--- a/tempest/scenario/test_volume_backup_restore.py
+++ b/tempest/scenario/test_volume_backup_restore.py
@@ -14,6 +14,7 @@
# under the License.
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
@@ -56,6 +57,8 @@
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 2d024e9..1564f25 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -68,6 +68,9 @@
waiters.wait_for_server_termination(self.servers_client, server['id'])
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
+ # Note: This test is being skipped based on 'public_network_id'.
+ # It is being used in create_floating_ip() method which gets called
+ # from get_server_ip() method
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index bc197b5..938d226 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -72,3 +72,79 @@
mock_show.assert_has_calls([mock.call(volume_id),
mock.call(volume_id)])
mock_sleep.assert_called_once_with(1)
+
+
+class TestInterfaceWaiters(base.TestCase):
+
+ build_timeout = 1.
+ build_interval = 1
+ port_down = {'interfaceAttachment': {'port_state': 'DOWN'}}
+ port_active = {'interfaceAttachment': {'port_state': 'ACTIVE'}}
+
+ def mock_client(self, **kwargs):
+ return mock.MagicMock(
+ build_timeout=self.build_timeout,
+ build_interval=self.build_interval,
+ **kwargs)
+
+ def test_wait_for_interface_status(self):
+ show_interface = mock.Mock(
+ side_effect=[self.port_down, self.port_active])
+ client = self.mock_client(show_interface=show_interface)
+ self.patch('time.time', return_value=0.)
+ sleep = self.patch('time.sleep')
+
+ result = waiters.wait_for_interface_status(
+ client, 'server_id', 'port_id', 'ACTIVE')
+
+ self.assertIs(self.port_active['interfaceAttachment'], result)
+ show_interface.assert_has_calls([mock.call('server_id', 'port_id'),
+ mock.call('server_id', 'port_id')])
+ sleep.assert_called_once_with(client.build_interval)
+
+ def test_wait_for_interface_status_timeout(self):
+ show_interface = mock.MagicMock(return_value=self.port_down)
+ client = self.mock_client(show_interface=show_interface)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ sleep = self.patch('time.sleep')
+
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_interface_status,
+ client, 'server_id', 'port_id', 'ACTIVE')
+
+ show_interface.assert_has_calls([mock.call('server_id', 'port_id'),
+ mock.call('server_id', 'port_id')])
+ sleep.assert_called_once_with(client.build_interval)
+
+ one_interface = {'interfaceAttachments': [{'port_id': 'port_one'}]}
+ two_interfaces = {'interfaceAttachments': [{'port_id': 'port_one'},
+ {'port_id': 'port_two'}]}
+
+ def test_wait_for_interface_detach(self):
+ list_interfaces = mock.MagicMock(
+ side_effect=[self.two_interfaces, self.one_interface])
+ client = self.mock_client(list_interfaces=list_interfaces)
+ self.patch('time.time', return_value=0.)
+ sleep = self.patch('time.sleep')
+
+ result = waiters.wait_for_interface_detach(
+ client, 'server_id', 'port_two')
+
+ self.assertIs(self.one_interface['interfaceAttachments'], result)
+ list_interfaces.assert_has_calls([mock.call('server_id'),
+ mock.call('server_id')])
+ sleep.assert_called_once_with(client.build_interval)
+
+ def test_wait_for_interface_detach_timeout(self):
+ list_interfaces = mock.MagicMock(return_value=self.one_interface)
+ client = self.mock_client(list_interfaces=list_interfaces)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ sleep = self.patch('time.sleep')
+
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_interface_detach,
+ client, 'server_id', 'port_one')
+
+ list_interfaces.assert_has_calls([mock.call('server_id'),
+ mock.call('server_id')])
+ sleep.assert_called_once_with(client.build_interval)
diff --git a/tempest/tests/lib/common/test_http.py b/tempest/tests/lib/common/test_http.py
index a292209..02436e0 100644
--- a/tempest/tests/lib/common/test_http.py
+++ b/tempest/tests/lib/common/test_http.py
@@ -12,57 +12,158 @@
# License for the specific language governing permissions and limitations
# under the License.
+import urllib3
+
from tempest.lib.common import http
from tempest.tests import base
+CERT_NONE = 'CERT_NONE'
+CERT_REQUIRED = 'CERT_REQUIRED'
+CERT_LOCATION = '/etc/ssl/certs/ca-certificates.crt'
+PROXY_URL = 'http://myproxy:3128'
+REQUEST_URL = 'http://10.0.0.107:5000/v2.0'
+REQUEST_METHOD = 'GET'
+
+
class TestClosingHttp(base.TestCase):
- def setUp(self):
- super(TestClosingHttp, self).setUp()
- self.cert_none = "CERT_NONE"
- self.cert_location = "/etc/ssl/certs/ca-certificates.crt"
- def test_constructor_invalid_ca_certs_and_timeout(self):
- connection = http.ClosingHttp(
- disable_ssl_certificate_validation=False,
- ca_certs=None,
- timeout=None)
- for attr in ('cert_reqs', 'ca_certs', 'timeout'):
- self.assertNotIn(attr, connection.connection_pool_kw)
+ def closing_http(self, **kwargs):
+ return http.ClosingHttp(**kwargs)
- def test_constructor_valid_ca_certs(self):
- cert_required = 'CERT_REQUIRED'
- connection = http.ClosingHttp(
- disable_ssl_certificate_validation=False,
- ca_certs=self.cert_location,
- timeout=None)
- self.assertEqual(cert_required,
+ def test_closing_http(self):
+ connection = self.closing_http()
+
+ self.assertNotIn('cert_reqs', connection.connection_pool_kw)
+ self.assertNotIn('ca_certs', connection.connection_pool_kw)
+ self.assertNotIn('timeout', connection.connection_pool_kw)
+
+ def test_closing_http_with_ca_certs(self):
+ connection = self.closing_http(ca_certs=CERT_LOCATION)
+
+ self.assertEqual(CERT_REQUIRED,
connection.connection_pool_kw['cert_reqs'])
- self.assertEqual(self.cert_location,
+ self.assertEqual(CERT_LOCATION,
connection.connection_pool_kw['ca_certs'])
- self.assertNotIn('timeout',
+
+ def test_closing_http_with_dscv(self):
+ connection = self.closing_http(
+ disable_ssl_certificate_validation=True)
+
+ self.assertEqual(CERT_NONE,
+ connection.connection_pool_kw['cert_reqs'])
+ self.assertNotIn('ca_certs',
connection.connection_pool_kw)
- def test_constructor_ssl_cert_validation_disabled(self):
- connection = http.ClosingHttp(
+ def test_closing_http_with_ca_certs_and_dscv(self):
+ connection = self.closing_http(
disable_ssl_certificate_validation=True,
- ca_certs=None,
- timeout=30)
- self.assertEqual(self.cert_none,
+ ca_certs=CERT_LOCATION)
+
+ self.assertEqual(CERT_NONE,
connection.connection_pool_kw['cert_reqs'])
- self.assertEqual(30,
+ self.assertNotIn('ca_certs',
+ connection.connection_pool_kw)
+
+ def test_closing_http_with_timeout(self):
+ timeout = 30
+ connection = self.closing_http(timeout=timeout)
+ self.assertEqual(timeout,
connection.connection_pool_kw['timeout'])
- self.assertNotIn('ca_certs',
- connection.connection_pool_kw)
- def test_constructor_ssl_cert_validation_disabled_and_ca_certs(self):
- connection = http.ClosingHttp(
- disable_ssl_certificate_validation=True,
- ca_certs=self.cert_location,
- timeout=None)
- self.assertNotIn('timeout',
- connection.connection_pool_kw)
- self.assertEqual(self.cert_none,
- connection.connection_pool_kw['cert_reqs'])
- self.assertNotIn('ca_certs',
- connection.connection_pool_kw)
+ def test_request(self):
+ # Given
+ connection = self.closing_http()
+ http_response = urllib3.HTTPResponse()
+ request = self.patch('urllib3.PoolManager.request',
+ return_value=http_response)
+ retry = self.patch('urllib3.util.Retry')
+
+ # When
+ response, data = connection.request(
+ method=REQUEST_METHOD,
+ url=REQUEST_URL)
+
+ # Then
+ request.assert_called_once_with(
+ REQUEST_METHOD,
+ REQUEST_URL,
+ headers={'connection': 'close'},
+ retries=retry(raise_on_redirect=False, redirect=5))
+ self.assertEqual(
+ {'content-location': REQUEST_URL,
+ 'status': str(http_response.status)},
+ response)
+ self.assertEqual(http_response.status, response.status)
+ self.assertEqual(http_response.reason, response.reason)
+ self.assertEqual(http_response.version, response.version)
+ self.assertEqual(http_response.data, data)
+
+ def test_request_with_fields(self):
+ # Given
+ connection = self.closing_http()
+ http_response = urllib3.HTTPResponse()
+ request = self.patch('urllib3.PoolManager.request',
+ return_value=http_response)
+ retry = self.patch('urllib3.util.Retry')
+ fields = object()
+
+ # When
+ connection.request(
+ method=REQUEST_METHOD,
+ url=REQUEST_URL,
+ fields=fields)
+
+ # Then
+ request.assert_called_once_with(
+ REQUEST_METHOD,
+ REQUEST_URL,
+ fields=fields,
+ headers=dict(connection='close'),
+ retries=retry(raise_on_redirect=False, redirect=5))
+
+ def test_request_with_headers(self):
+ # Given
+ connection = self.closing_http()
+ headers = {'Xtra Key': 'Xtra Value'}
+ http_response = urllib3.HTTPResponse(headers=headers)
+ request = self.patch('urllib3.PoolManager.request',
+ return_value=http_response)
+ retry = self.patch('urllib3.util.Retry')
+
+ # When
+ response, _ = connection.request(
+ method=REQUEST_METHOD,
+ url=REQUEST_URL,
+ headers=headers)
+
+ # Then
+ request.assert_called_once_with(
+ REQUEST_METHOD,
+ REQUEST_URL,
+ headers=dict(headers, connection='close'),
+ retries=retry(raise_on_redirect=False, redirect=5))
+ self.assertEqual(
+ {'content-location': REQUEST_URL,
+ 'status': str(http_response.status),
+ 'xtra key': 'Xtra Value'},
+ response)
+
+
+class TestClosingProxyHttp(TestClosingHttp):
+
+ def closing_http(self, proxy_url=PROXY_URL, **kwargs):
+ connection = http.ClosingProxyHttp(proxy_url=proxy_url, **kwargs)
+ self.assertHasProxy(connection, proxy_url)
+ return connection
+
+ def test_class_without_proxy_url(self):
+ self.assertRaises(ValueError, http.ClosingProxyHttp, None)
+
+ def assertHasProxy(self, connection, proxy_url):
+ self.assertIsInstance(connection, http.ClosingProxyHttp)
+ proxy = connection.proxy
+ self.assertEqual(proxy_url,
+ '%s://%s:%i' % (proxy.scheme,
+ proxy.host,
+ proxy.port))
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index f638ba6..865767b 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -12,12 +12,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+import time
+
import mock
+from tempest.lib.common import thread
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
from tempest.tests import base
-from tempest.tests import utils
class TestTestUtils(base.TestCase):
@@ -78,47 +81,126 @@
42, test_utils.call_and_ignore_notfound_exc(m, *args, **kwargs))
m.assert_called_once_with(*args, **kwargs)
- @mock.patch('time.sleep')
- @mock.patch('time.time')
- def test_call_until_true_when_f_never_returns_true(self, m_time, m_sleep):
- def set_value(bool_value):
- return bool_value
- timeout = 42 # The value doesn't matter as we mock time.time()
- sleep = 60 # The value doesn't matter as we mock time.sleep()
- m_time.side_effect = utils.generate_timeout_series(timeout)
- self.assertEqual(
- False, test_utils.call_until_true(set_value, timeout, sleep, False)
- )
- m_sleep.call_args_list = [mock.call(sleep)] * 2
- m_time.call_args_list = [mock.call()] * 2
- @mock.patch('time.sleep')
- @mock.patch('time.time')
- def test_call_until_true_when_f_returns_true(self, m_time, m_sleep):
- def set_value(bool_value=False):
- return bool_value
- timeout = 42 # The value doesn't matter as we mock time.time()
- sleep = 60 # The value doesn't matter as we mock time.sleep()
- m_time.return_value = 0
- self.assertEqual(
- True, test_utils.call_until_true(set_value, timeout, sleep,
- bool_value=True)
- )
- self.assertEqual(0, m_sleep.call_count)
- # when logging cost time we need to acquire current time.
- self.assertEqual(2, m_time.call_count)
+class TestCallUntilTrue(base.TestCase):
- @mock.patch('time.sleep')
- @mock.patch('time.time')
- def test_call_until_true_when_f_returns_true_no_param(
- self, m_time, m_sleep):
- def set_value(bool_value=False):
- return bool_value
- timeout = 42 # The value doesn't matter as we mock time.time()
- sleep = 60 # The value doesn't matter as we mock time.sleep()
- m_time.side_effect = utils.generate_timeout_series(timeout)
- self.assertEqual(
- False, test_utils.call_until_true(set_value, timeout, sleep)
- )
- m_sleep.call_args_list = [mock.call(sleep)] * 2
- m_time.call_args_list = [mock.call()] * 2
+ def test_call_until_true_when_true_at_first_call(self):
+ """func returns True at first call
+
+ """
+ self._test_call_until_true(return_values=[True],
+ duration=30.,
+ time_sequence=[10., 60.])
+
+ def test_call_until_true_when_true_before_timeout(self):
+ """func returns false at first call, then True before timeout
+
+ """
+ self._test_call_until_true(return_values=[False, True],
+ duration=30.,
+ time_sequence=[10., 39., 41.])
+
+ def test_call_until_true_when_never_true_before_timeout(self):
+ """func returns false, then false, just before timeout
+
+ """
+ self._test_call_until_true(return_values=[False, False],
+ duration=30.,
+ time_sequence=[10., 39., 41.])
+
+ def test_call_until_true_with_params(self):
+ """func is called using given parameters
+
+ """
+ self._test_call_until_true(return_values=[False, True],
+ duration=30.,
+ time_sequence=[10., 30., 60.],
+ args=(1, 2),
+ kwargs=dict(foo='bar', bar='foo'))
+
+ def _test_call_until_true(self, return_values, duration, time_sequence,
+ args=None, kwargs=None):
+ """Test call_until_true function
+
+ :param return_values: list of booleans values to be returned
+ each time given function is called. If any of these values
+ is not consumed by calling the function the test fails.
+ The list must contain a sequence of False items terminated
+ by a single True or False
+ :param duration: parameter passed to call_until_true function
+ (a floating point value).
+ :param time_sequence: sequence of time values returned by
+ mocked time.time function used to trigger call_until_true
+ behavior when handling timeout condition. The sequence must
+ contain the exact number of values expected to be consumed by
+ each time call_until_true calls time.time function.
+ :param args: sequence of positional arguments to be passed
+ to call_until_true function.
+ :param kwargs: sequence of named arguments to be passed
+ to call_until_true function.
+ """
+
+ # all values except the last are False
+ self.assertEqual([False] * len(return_values[:-1]), return_values[:-1])
+ # last value can be True or False
+ self.assertIn(return_values[-1], [True, False])
+
+ # GIVEN
+ func = mock.Mock(side_effect=return_values)
+ sleep = 10. # this value has no effect as time.sleep is being mocked
+ sleep_func = self.patch('time.sleep')
+ time_func = self._patch_time(time_sequence)
+ args = args or tuple()
+ kwargs = kwargs or dict()
+
+ # WHEN
+ result = test_utils.call_until_true(func, duration, sleep,
+ *args, **kwargs)
+ # THEN
+
+ # It must return last returned value
+ self.assertIs(return_values[-1], result)
+
+ self._test_func_calls(func, return_values, *args, **kwargs)
+ self._test_sleep_calls(sleep_func, return_values, sleep)
+ # The number of times time.time is called is not relevant as a
+ # requirement of call_until_true. What is instead relevant is that
+ # call_until_true use a mocked function to make the test reliable
+ # and the test actually provide the right sequence of numbers to
+ # reproduce the behavior has to be tested
+ self._assert_called_n_times(time_func, len(time_sequence))
+
+ def _patch_time(self, time_sequence):
+ # Iterator over time sequence
+ time_iterator = iter(time_sequence)
+ # Preserve original time.time() behavior for other threads
+ original_time = time.time
+ thread_id = thread.get_ident()
+
+ def mocked_time():
+ if thread.get_ident() == thread_id:
+ # Test thread => return time sequence values
+ return next(time_iterator)
+ else:
+ # Other threads => call original time function
+ return original_time()
+
+ return self.patch('time.time', side_effect=mocked_time)
+
+ def _test_func_calls(self, func, return_values, *args, **kwargs):
+ self._assert_called_n_times(func, len(return_values), *args, **kwargs)
+
+ def _test_sleep_calls(self, sleep_func, return_values, sleep):
+ # count first consecutive False
+ expected_count = 0
+ for value in return_values:
+ if value:
+ break
+ expected_count += 1
+ self._assert_called_n_times(sleep_func, expected_count, sleep)
+
+ def _assert_called_n_times(self, mock_func, expected_count, *args,
+ **kwargs):
+ calls = [mock.call(*args, **kwargs)] * expected_count
+ self.assertEqual(expected_count, mock_func.call_count)
+ mock_func.assert_has_calls(calls)
diff --git a/tempest/tests/lib/services/volume/v3/test_services_client.py b/tempest/tests/lib/services/volume/v3/test_services_client.py
new file mode 100644
index 0000000..f65228f
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_services_client.py
@@ -0,0 +1,214 @@
+# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.services.volume.v3 import services_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServicesClient(base.BaseServiceTest):
+
+ FAKE_SERVICE_LIST = {
+ "services": [
+ {
+ "status": "enabled",
+ "binary": "cinder-backup",
+ "zone": "nova",
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:17.000000",
+ "host": "fake-host",
+ "disabled_reason": None
+ },
+ {
+ "status": "enabled",
+ "binary": "cinder-scheduler",
+ "zone": "nova",
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:24.000000",
+ "host": "fake-host",
+ "disabled_reason": None
+ },
+ {
+ "status": "enabled",
+ "binary": "cinder-volume",
+ "zone": "nova",
+ "frozen": False,
+ "state": "up",
+ "updated_at": "2017-07-20T07:20:20.000000",
+ "host": "fake-host@lvm",
+ "replication_status": "disabled",
+ "active_backend_id": None,
+ "disabled_reason": None
+ }
+ ]
+ }
+
+ FAKE_SERVICE_REQUEST = {
+ "host": "fake-host",
+ "binary": "cinder-volume"
+ }
+
+ FAKE_SERVICE_RESPONSE = {
+ "disabled": False,
+ "status": "enabled",
+ "host": "fake-host@lvm",
+ "service": "",
+ "binary": "cinder-volume",
+ "disabled_reason": None
+ }
+
+ def setUp(self):
+ super(TestServicesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = services_client.ServicesClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_services(self, bytes_body=False,
+ mock_args='os-services', **params):
+ self.check_service_client_function(
+ self.client.list_services,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SERVICE_LIST,
+ to_utf=bytes_body,
+ mock_args=[mock_args],
+ **params)
+
+ def _test_enable_service(self, bytes_body=False):
+ resp_body = self.FAKE_SERVICE_RESPONSE
+ kwargs = self.FAKE_SERVICE_REQUEST
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.enable_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/enable', payload],
+ **kwargs)
+
+ def _test_disable_service(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
+ resp_body.pop('disabled_reason')
+ resp_body['disabled'] = True
+ resp_body['status'] = 'disabled'
+ kwargs = self.FAKE_SERVICE_REQUEST
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.disable_service,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/disable', payload],
+ **kwargs)
+
+ def _test_disable_log_reason(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
+ resp_body['disabled_reason'] = "disabled for test"
+ resp_body['disabled'] = True
+ resp_body['status'] = 'disabled'
+ kwargs = copy.deepcopy(self.FAKE_SERVICE_REQUEST)
+ kwargs.update({"disabled_reason": "disabled for test"})
+ payload = json.dumps(kwargs, sort_keys=True)
+ json_dumps = json.dumps
+
+ # NOTE: Use sort_keys for json.dumps so that the expected and actual
+ # payloads are guaranteed to be identical for mock_args assert check.
+ with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
+ mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
+
+ self.check_service_client_function(
+ self.client.disable_log_reason,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ to_utf=bytes_body,
+ mock_args=['os-services/disable-log-reason', payload],
+ **kwargs)
+
+ def _test_freeze_host(self, bytes_body=False):
+ kwargs = {'host': 'host1@lvm'}
+ self.check_service_client_function(
+ self.client.freeze_host,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ **kwargs)
+
+ def _test_thaw_host(self, bytes_body=False):
+ kwargs = {'host': 'host1@lvm'}
+ self.check_service_client_function(
+ self.client.thaw_host,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ **kwargs)
+
+ def test_list_services_with_str_body(self):
+ self._test_list_services()
+
+ def test_list_services_with_bytes_body(self):
+ self._test_list_services(bytes_body=True)
+
+ def test_list_services_with_params(self):
+ mock_args = 'os-services?host=fake-host'
+ self._test_list_services(mock_args=mock_args, host='fake-host')
+
+ def test_enable_service_with_str_body(self):
+ self._test_enable_service()
+
+ def test_enable_service_with_bytes_body(self):
+ self._test_enable_service(bytes_body=True)
+
+ def test_disable_service_with_str_body(self):
+ self._test_disable_service()
+
+ def test_disable_service_with_bytes_body(self):
+ self._test_disable_service(bytes_body=True)
+
+ def test_disable_log_reason_with_str_body(self):
+ self._test_disable_log_reason()
+
+ def test_disable_log_reason_with_bytes_body(self):
+ self._test_disable_log_reason(bytes_body=True)
+
+ def test_freeze_host_with_str_body(self):
+ self._test_freeze_host()
+
+ def test_freeze_host_with_bytes_body(self):
+ self._test_freeze_host(bytes_body=True)
+
+ def test_thaw_host_with_str_body(self):
+ self._test_thaw_host()
+
+ def test_thaw_host_with_bytes_body(self):
+ self._test_thaw_host(bytes_body=True)