Merge "Add response schema validation for volume types"
diff --git a/.gitignore b/.gitignore
index 06a2281..9767e52 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,9 @@
+# Don't add patterns to exclude files created by preferred personal tools
+# (editors, IDEs, your operating system itself even). These should instead be
+# maintained outside the repository, for example in a ~/.gitignore file added
+# with:
+#
+# git config --global core.excludesfile '~/.gitignore'
AUTHORS
ChangeLog
*.pyc
diff --git a/.zuul.yaml b/.zuul.yaml
index 4ca14ad..70f582e 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -80,6 +80,7 @@
Integration test of IPv6-only deployments. This job runs
smoke and IPv6 relates tests only. Basic idea is to test
whether OpenStack Services listen on IPv6 addrress or not.
+ timeout: 10800
vars:
tox_envlist: ipv6-only
@@ -100,6 +101,14 @@
devstack_localrc:
ENABLE_FILE_INJECTION: true
ENABLE_VOLUME_MULTIATTACH: true
+ USE_PYTHON3: False
+ devstack_services:
+ # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+ # since tempest-full is in the integrated-gate project template but
+ # the backup tests do not really involve other services so they should
+ # be run in some more cinder-specific job, especially because the
+ # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+ c-bak: false
- job:
name: tempest-full-oslo-master
@@ -131,6 +140,9 @@
- opendev.org/openstack/oslo.utils
- opendev.org/openstack/oslo.versionedobjects
- opendev.org/openstack/oslo.vmware
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: True
- job:
name: tempest-full-parallel
@@ -139,11 +151,13 @@
branches:
- master
description: |
- Base integration test with Neutron networking and py27.
+ Base integration test with Neutron networking.
It includes all scenarios as it was in the past.
This job runs all scenario tests in parallel!
vars:
tox_envlist: full-parallel
+ devstack_localrc:
+ USE_PYTHON3: True
- job:
name: tempest-full-py3
@@ -169,6 +183,12 @@
s-object: false
s-proxy: false
# without Swift, c-bak cannot run (in the Gate at least)
+ # NOTE(mriedem): Disable the cinder-backup service from
+ # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+ # project template but the backup tests do not really involve other
+ # services so they should be run in some more cinder-specific job,
+ # especially because the tests fail at a high rate (see bugs 1483434,
+ # 1813217, 1745168)
c-bak: false
- job:
@@ -332,6 +352,13 @@
- stable/pike
- stable/queens
- stable/rocky
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
- job:
name: tempest-multinode-full-py3
@@ -339,14 +366,18 @@
vars:
devstack_localrc:
USE_PYTHON3: true
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
- job:
- name: tempest-full-py3-opensuse150
+ name: tempest-full-py3-opensuse15
parent: tempest-full-py3
- nodeset: devstack-single-node-opensuse-150
+ nodeset: devstack-single-node-opensuse-15
description: |
Base integration test with Neutron networking and py36 running
- on openSUSE Leap 15.0
+ on openSUSE Leap 15.x
voting: false
- job:
@@ -403,11 +434,15 @@
s-proxy: false
# without Swift, c-bak cannot run (in the Gate at least)
c-bak: false
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
- job:
- name: tempest-full-stein
- parent: tempest-full
- override-checkout: stable/stein
+ name: tempest-full-train-py3
+ parent: tempest-full-py3
+ override-checkout: stable/train
- job:
name: tempest-full-stein-py3
@@ -415,30 +450,12 @@
override-checkout: stable/stein
- job:
- name: tempest-full-rocky
- parent: tempest-full
- nodeset: openstack-single-node-xenial
- override-checkout: stable/rocky
-
-- job:
name: tempest-full-rocky-py3
parent: tempest-full-py3
nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
- job:
- name: tempest-full-queens
- parent: tempest-full
- nodeset: openstack-single-node-xenial
- override-checkout: stable/queens
-
-- job:
- name: tempest-full-queens-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-xenial
- override-checkout: stable/queens
-
-- job:
name: tempest-tox-plugin-sanity-check
parent: tox
description: |
@@ -446,86 +463,7 @@
nodeset: ubuntu-bionic
vars:
tox_envlist: plugin-sanity-check
- voting: false
timeout: 5000
- required-projects:
- - opendev.org/airship/tempest-plugin
- - opendev.org/x/almanach
- - opendev.org/openstack/aodh
- - opendev.org/openstack/barbican-tempest-plugin
- - opendev.org/openstack/blazar-tempest-plugin
- - opendev.org/openstack/ceilometer
- - opendev.org/openstack/cinder-tempest-plugin
- - opendev.org/openstack/cloudkitty-tempest-plugin
- - opendev.org/openstack/congress-tempest-plugin
- - opendev.org/openstack/cyborg-tempest-plugin
- - opendev.org/openstack/designate-tempest-plugin
- - opendev.org/openstack/ec2api-tempest-plugin
- - opendev.org/openstack/freezer
- - opendev.org/openstack/freezer-api
- - opendev.org/openstack/freezer-tempest-plugin
- - opendev.org/x/gabbi-tempest
- - opendev.org/x/gce-api
- - opendev.org/x/glare
- - opendev.org/openstack/heat-tempest-plugin
- - opendev.org/x/intel-nfv-ci-tests
- - opendev.org/openstack/ironic-tempest-plugin
- - opendev.org/openstack/ironic-inspector
- - opendev.org/openstack/keystone-tempest-plugin
- - opendev.org/x/kingbird
- - opendev.org/openstack/kuryr-tempest-plugin
- - opendev.org/openstack/magnum
- - opendev.org/openstack/magnum-tempest-plugin
- - opendev.org/openstack/manila
- - opendev.org/openstack/manila-tempest-plugin
- - opendev.org/openstack/mistral-tempest-plugin
- - opendev.org/x/mogan
- - opendev.org/openstack/monasca-api
- - opendev.org/openstack/monasca-log-api
- - opendev.org/openstack/monasca-tempest-plugin
- - opendev.org/openstack/murano-tempest-plugin
- - opendev.org/openstack/networking-bgpvpn
- - opendev.org/x/networking-cisco
- - opendev.org/x/networking-fortinet
- - opendev.org/openstack/networking-generic-switch
- - opendev.org/openstack/networking-l2gw-tempest-plugin
- - opendev.org/openstack/networking-midonet
- - opendev.org/openstack/networking-sfc
- - opendev.org/x/networking-spp
- - opendev.org/openstack/neutron
- - opendev.org/openstack/neutron-dynamic-routing
- - opendev.org/openstack/neutron-fwaas
- - opendev.org/openstack/neutron-lbaas
- - opendev.org/openstack/neutron-tempest-plugin
- - opendev.org/openstack/neutron-vpnaas
- - opendev.org/x/nova-lxd
- - opendev.org/x/novajoin-tempest-plugin
- - opendev.org/openstack/octavia-tempest-plugin
- - opendev.org/openstack/oswin-tempest-plugin
- - opendev.org/openstack/panko
- - opendev.org/openstack/patrole
- - opendev.org/openstack/python-watcherclient
- - opendev.org/openstack/qinling
- - opendev.org/openstack/requirements
- - opendev.org/openstack/sahara-tests
- - opendev.org/openstack/senlin
- - opendev.org/openstack/senlin-tempest-plugin
- - opendev.org/openstack/solum-tempest-plugin
- - opendev.org/x/tap-as-a-service
- - opendev.org/x/tap-as-a-service-tempest-plugin
- - opendev.org/openstack/telemetry-tempest-plugin
- - opendev.org/openstack/tempest-horizon
- - opendev.org/x/tobiko
- - opendev.org/x/trio2o
- - opendev.org/openstack/tripleo-common-tempest-plugin
- - opendev.org/openstack/trove-tempest-plugin
- - opendev.org/x/valet
- - opendev.org/openstack/vitrage-tempest-plugin
- - opendev.org/x/vmware-nsx-tempest-plugin
- - opendev.org/openstack/watcher-tempest-plugin
- - opendev.org/x/whitebox-tempest-plugin
- - opendev.org/openstack/zaqar-tempest-plugin
- - opendev.org/openstack/zun-tempest-plugin
- job:
name: tempest-cinder-v2-api
@@ -571,12 +509,13 @@
name: tempest-pg-full
parent: tempest-full
description: |
- Base integration test with Neutron networking and py27 and PostgreSQL.
+ Base integration test with Neutron networking and PostgreSQL.
Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
vars:
devstack_localrc:
ENABLE_FILE_INJECTION: true
DATABASE_TYPE: postgresql
+ USE_PYTHON3: True
- project-template:
name: integrated-gate-networking
@@ -660,12 +599,9 @@
- project:
templates:
- check-requirements
- - integrated-gate
- integrated-gate-py3
- openstack-cover-jobs
- - openstack-python-jobs
- - openstack-python35-jobs
- - openstack-python3-train-jobs
+ - openstack-python3-ussuri-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
@@ -692,25 +628,21 @@
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
- ^tools/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
- tempest-full-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-py3-ipv6:
voting: false
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-stein:
+ - tempest-full-train-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-stein-py3:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-rocky:
- irrelevant-files: *tempest-irrelevant-files
- tempest-full-rocky-py3:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-queens:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-queens-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-multinode-full:
- irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-tox-plugin-sanity-check:
@@ -722,25 +654,25 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
# tools/ is not here since this relies on a script in tools/.
- tempest-ipv6-only:
irrelevant-files: *tempest-irrelevant-files-2
- - tempest-slow:
- irrelevant-files: *tempest-irrelevant-files
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
voting: false
irrelevant-files: *tempest-irrelevant-files
+ - devstack-plugin-ceph-tempest-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade:
- irrelevant-files: *tempest-irrelevant-files
- grenade-py3:
irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario001-tempest-centos-7:
voting: false
irrelevant-files: *tempest-irrelevant-files
@@ -755,8 +687,6 @@
irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full:
- irrelevant-files: *tempest-irrelevant-files
- interop-tempest-consistency:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-test-account-py3:
@@ -765,16 +695,15 @@
- tempest-full-test-account-no-admin-py3:
voting: false
irrelevant-files: *tempest-irrelevant-files
- - openstack-tox-bashate
+ - openstack-tox-bashate:
+ irrelevant-files: *tempest-irrelevant-files-2
gate:
jobs:
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade:
+ - tempest-full-py3:
irrelevant-files: *tempest-irrelevant-files
- grenade-py3:
irrelevant-files: *tempest-irrelevant-files
@@ -794,20 +723,15 @@
irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-lvm-multibackend:
irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files: *tempest-irrelevant-files
- tempest-pg-full:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-opensuse150:
+ - tempest-full-py3-opensuse15:
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
- - tempest-full-stein
+ - tempest-full-train-py3
- tempest-full-stein-py3
- - tempest-full-rocky
- tempest-full-rocky-py3
- - tempest-full-queens
- - tempest-full-queens-py3
periodic:
jobs:
- tempest-all
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..a89ad94
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,17 @@
+If you would like to contribute to the development of OpenStack, you must
+follow the steps in this page:
+
+ https://docs.openstack.org/infra/manual/developers.html
+
+If you already have a good understanding of how the system works and your
+OpenStack accounts are set up, you can skip to the development workflow
+section of this documentation to learn how changes to OpenStack should be
+submitted for review via the Gerrit tool:
+
+ https://docs.openstack.org/infra/manual/developers.html#development-workflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+ https://bugs.launchpad.net/tempest
diff --git a/README.rst b/README.rst
index 841fae6..3cde2bf 100644
--- a/README.rst
+++ b/README.rst
@@ -10,274 +10,15 @@
Tempest - The OpenStack Integration Test Suite
==============================================
-The documentation for Tempest is officially hosted at:
-https://docs.openstack.org/tempest/latest/
-
This is a set of integration tests to be run against a live OpenStack
cluster. Tempest has batteries of tests for OpenStack API validation,
scenarios, and other specific tests useful in validating an OpenStack
deployment.
-Design Principles
------------------
-Tempest Design Principles that we strive to live by.
+ * Documentation: https://docs.openstack.org/tempest/latest/
+ * Features: https://specs.openstack.org/openstack/qa-specs/#tempest
+ * Bugs: https://bugs.launchpad.net/tempest/
+ * Release Notes: https://docs.openstack.org/releasenotes/tempest
-- Tempest should be able to run against any OpenStack cloud, be it a
- one node DevStack install, a 20 node LXC cloud, or a 1000 node KVM
- cloud.
-- Tempest should be explicit in testing features. It is easy to auto
- discover features of a cloud incorrectly, and give people an
- incorrect assessment of their cloud. Explicit is always better.
-- Tempest uses OpenStack public interfaces. Tests in Tempest should
- only touch public OpenStack APIs.
-- Tempest should not touch private or implementation specific
- interfaces. This means not directly going to the database, not
- directly hitting the hypervisors, not testing extensions not
- included in the OpenStack base. If there are some features of
- OpenStack that are not verifiable through standard interfaces, this
- should be considered a possible enhancement.
-- Tempest strives for complete coverage of the OpenStack API and
- common scenarios that demonstrate a working cloud.
-- Tempest drives load in an OpenStack cloud. By including a broad
- array of API and scenario tests Tempest can be reused in whole or in
- parts as load generation for an OpenStack cloud.
-- Tempest should attempt to clean up after itself, whenever possible
- we should tear down resources when done.
-- Tempest should be self-testing.
-
-Quickstart
-----------
-
-To run Tempest, you first need to create a configuration file that will tell
-Tempest where to find the various OpenStack services and other testing behavior
-switches. Where the configuration file lives and how you interact with it
-depends on how you'll be running Tempest. There are 2 methods of using Tempest.
-The first, which is a newer and recommended workflow treats Tempest as a system
-installed program. The second older method is to run Tempest assuming your
-working dir is the actually Tempest source repo, and there are a number of
-assumptions related to that. For this section we'll only cover the newer method
-as it is simpler, and quicker to work with.
-
-#. You first need to install Tempest. This is done with pip after you check out
- the Tempest repo::
-
- $ git clone https://opendev.org/openstack/tempest
- $ pip install tempest/
-
- This can be done within a venv, but the assumption for this guide is that
- the Tempest CLI entry point will be in your shell's PATH.
-
-#. Installing Tempest may create a ``/etc/tempest dir``, however if one isn't
- created you can create one or use ``~/.tempest/etc`` or ``~/.config/tempest`` in
- place of ``/etc/tempest``. If none of these dirs are created Tempest will create
- ``~/.tempest/etc`` when it's needed. The contents of this dir will always
- automatically be copied to all ``etc/`` dirs in local workspaces as an initial
- setup step. So if there is any common configuration you'd like to be shared
- between local Tempest workspaces it's recommended that you pre-populate it
- before running ``tempest init``.
-
-#. Setup a local Tempest workspace. This is done by using the tempest init
- command::
-
- $ tempest init cloud-01
-
- which also works the same as::
-
- $ mkdir cloud-01 && cd cloud-01 && tempest init
-
- This will create a new directory for running a single Tempest configuration.
- If you'd like to run Tempest against multiple OpenStack deployments the idea
- is that you'll create a new working directory for each to maintain separate
- configuration files and local artifact storage for each.
-
-#. Then ``cd`` into the newly created working dir and also modify the local
- config files located in the ``etc/`` subdir created by the ``tempest init``
- command. Tempest is expecting a ``tempest.conf`` file in etc/ so if only a
- sample exists you must rename or copy it to tempest.conf before making
- any changes to it otherwise Tempest will not know how to load it. For
- details on configuring Tempest refer to the
- `Tempest Configuration <https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
-
-#. Once the configuration is done you're now ready to run Tempest. This can
- be done using the `Tempest Run <https://docs.openstack.org/tempest/latest/run.html#tempest-run>`_
- command. This can be done by either
- running::
-
- $ tempest run
-
- from the Tempest workspace directory. Or you can use the ``--workspace``
- argument to run in the workspace you created regardless of your current
- working directory. For example::
-
- $ tempest run --workspace cloud-01
-
- There is also the option to use `stestr`_ directly. For example, from
- the workspace dir run::
-
- $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
-
- will run the same set of tests as the default gate jobs. Or you can
- use `unittest`_ compatible test runners such as `testr`_, `pytest`_ etc.
-
- Tox also contains several existing job configurations. For example::
-
- $ tox -e full
-
- which will run the same set of tests as the OpenStack gate. (it's exactly how
- the gate invokes Tempest) Or::
-
- $ tox -e smoke
-
- to run the tests tagged as smoke.
-
-.. _unittest: https://docs.python.org/3/library/unittest.html
-.. _testr: https://testrepository.readthedocs.org/en/latest/MANUAL.html
-.. _stestr: https://stestr.readthedocs.org/en/latest/MANUAL.html
-.. _pytest: https://docs.pytest.org/en/latest/
-
-Library
--------
-Tempest exposes a library interface. This interface is a stable interface and
-should be backwards compatible (including backwards compatibility with the
-old tempest-lib package, with the exception of the import). If you plan to
-directly consume Tempest in your project you should only import code from the
-Tempest library interface, other pieces of Tempest do not have the same
-stable interface and there are no guarantees on the Python API unless otherwise
-stated.
-
-For more details refer to the `library documentation
-<https://docs.openstack.org/tempest/latest/library.html#library>`_
-
-Release Versioning
-------------------
-`Tempest Release Notes <https://docs.openstack.org/releasenotes/tempest>`_
-shows what changes have been released on each version.
-
-Tempest's released versions are broken into 2 sets of information. Depending on
-how you intend to consume Tempest you might need
-
-The version is a set of 3 numbers:
-
-X.Y.Z
-
-While this is almost `semver`_ like, the way versioning is handled is slightly
-different:
-
-X is used to represent the supported OpenStack releases for Tempest tests
-in-tree, and to signify major feature changes to Tempest. It's a monotonically
-increasing integer where each version either indicates a new supported OpenStack
-release, the drop of support for an OpenStack release (which will coincide with
-the upstream stable branch going EOL), or a major feature lands (or is removed)
-from Tempest.
-
-Y.Z is used to represent library interface changes. This is treated the same
-way as minor and patch versions from `semver`_ but only for the library
-interface. When Y is incremented we've added functionality to the library
-interface and when Z is incremented it's a bug fix release for the library.
-Also note that both Y and Z are reset to 0 at each increment of X.
-
-.. _semver: https://semver.org/
-
-Configuration
--------------
-
-Detailed configuration of Tempest is beyond the scope of this
-document, see `Tempest Configuration Documentation
-<https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
-for more details on configuring Tempest.
-The ``etc/tempest.conf.sample`` attempts to be a self-documenting
-version of the configuration.
-
-You can generate a new sample tempest.conf file, run the following
-command from the top level of the Tempest directory::
-
- $ tox -e genconfig
-
-The most important pieces that are needed are the user ids, OpenStack
-endpoints, and basic flavors and images needed to run tests.
-
-Unit Tests
-----------
-
-Tempest also has a set of unit tests which test the Tempest code itself. These
-tests can be run by specifying the test discovery path::
-
- $ stestr --test-path ./tempest/tests run
-
-By setting ``--test-path`` option to ./tempest/tests it specifies that test discover
-should only be run on the unit test directory. The default value of ``test_path``
-is ``test_path=./tempest/test_discover`` which will only run test discover on the
-Tempest suite.
-
-Alternatively, there are the py27 and py36 tox jobs which will run the unit
-tests with the corresponding version of python.
-
-One common activity is to just run a single test, you can do this with tox
-simply by specifying to just run py27 or py36 tests against a single test::
-
- $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
-
-Or all tests in the test_microversions.py file::
-
- $ tox -e py36 -- -n tempest.tests.test_microversions
-
-You may also use regular expressions to run any matching tests::
-
- $ tox -e py36 -- test_microversions
-
-Additionally, when running a single test, or test-file, the ``-n/--no-discover``
-argument is no longer required, however it may perform faster if included.
-
-For more information on these options and details about stestr, please see the
-`stestr documentation <https://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
-
-Python 3.x
-----------
-
-Starting during the Pike cycle Tempest has a gating CI job that runs Tempest
-with Python 3. Any Tempest release after 15.0.0 should fully support running
-under Python 3 as well as Python 2.7.
-
-Legacy run method
------------------
-
-The legacy method of running Tempest is to just treat the Tempest source code
-as a python unittest repository and run directly from the source repo. When
-running in this way you still start with a Tempest config file and the steps
-are basically the same except that it expects you know where the Tempest code
-lives on your system and requires a bit more manual interaction to get Tempest
-running. For example, when running Tempest this way things like a lock file
-directory do not get generated automatically and the burden is on the user to
-create and configure that.
-
-To start you need to create a configuration file. The easiest way to create a
-configuration file is to generate a sample in the ``etc/`` directory ::
-
- $ cd $TEMPEST_ROOT_DIR
- $ oslo-config-generator --config-file \
- tempest/cmd/config-generator.tempest.conf \
- --output-file etc/tempest.conf
-
-After that, open up the ``etc/tempest.conf`` file and edit the
-configuration variables to match valid data in your environment.
-This includes your Keystone endpoint, a valid user and credentials,
-and reference data to be used in testing.
-
-.. note::
-
- If you have a running DevStack environment, Tempest will be
- automatically configured and placed in ``/opt/stack/tempest``. It
- will have a configuration file already set up to work with your
- DevStack installation.
-
-Tempest is not tied to any single test runner, but `testr`_ is the most commonly
-used tool. Also, the nosetests test runner is **not** recommended to run Tempest.
-
-After setting up your configuration file, you can execute the set of Tempest
-tests by using ``testr`` ::
-
- $ testr run --parallel
-
-To run one single test serially ::
-
- $ testr run tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
+Get in touch via `email <mailto:openstack-discuss@lists.openstack.org>`_. Use
+[tempest] in your subject.
diff --git a/REVIEWING.rst b/REVIEWING.rst
index 498ce66..e07e358 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -183,6 +183,9 @@
* the project's PTL +1's the change
* the patch does not affect any other project's testing gates
* the patch does not cause any negative side effects
+ * If fixing and removing the faulty plugin (which leads to fail
+ voting ``tempest-tox-plugin-sanity-check`` job) and unblock the
+ tempest gate
Note that such a policy should be used judiciously, as we should strive to
have two +2's on each patch set, prior to approval.
diff --git a/doc/requirements.txt b/doc/requirements.txt
index d959d44..9f38ada 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,6 +1,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-openstackdocstheme>=1.18.1 # Apache-2.0
+openstackdocstheme>=1.20.0 # Apache-2.0
reno>=2.5.0 # Apache-2.0
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
+sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD
+sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c2ea628..7ce431e 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -52,6 +52,7 @@
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
+ 'sphinxcontrib.rsvgconverter',
'openstackdocstheme',
'oslo_config.sphinxconfiggen',
]
@@ -66,9 +67,6 @@
bug_project = 'tempest'
bug_tag = 'doc'
-# Must set this variable to include year, month, day, hours, and minutes.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -82,7 +80,6 @@
master_doc = 'index'
# General information about the project.
-project = u'Tempest'
copyright = u'2013, OpenStack QA Team'
# The language for content autogenerated by Sphinx. Refer to documentation
@@ -196,3 +193,16 @@
# A list of warning types to suppress arbitrary warning messages.
suppress_warnings = ['image.nonlocal_uri']
+
+# -- Options for LaTeX output -------------------------------------------------
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+ ('index', 'doc-tempest.tex', u'Tempest Testing Project',
+ u'OpenStack Foundation', 'manual'),
+]
+
+# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
+latex_use_xindy = False
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fecf98a..ab994d1 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -48,6 +48,14 @@
workspace
run
+Supported OpenStack Releases and Python Versions
+------------------------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ supported_version
+
Developers Guide
================
@@ -88,7 +96,12 @@
stable_branch_support_policy
-Indices and tables
-==================
+Search
+======
-* :ref:`search`
+.. only:: html
+
+ * :ref:`Tempest document search <search>`: Search the contents of this document.
+
+* `OpenStack wide search <https://docs.openstack.org>`_: Search the wider
+ set of OpenStack documentation, including forums.
\ No newline at end of file
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
deleted file mode 120000
index c768ff7..0000000
--- a/doc/source/overview.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../README.rst
\ No newline at end of file
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
new file mode 100644
index 0000000..e51b90b
--- /dev/null
+++ b/doc/source/overview.rst
@@ -0,0 +1,281 @@
+Tempest - The OpenStack Integration Test Suite
+==============================================
+
+The documentation for Tempest is officially hosted at:
+https://docs.openstack.org/tempest/latest/
+
+This is a set of integration tests to be run against a live OpenStack
+cluster. Tempest has batteries of tests for OpenStack API validation,
+scenarios, and other specific tests useful in validating an OpenStack
+deployment.
+
+Team and repository tags
+------------------------
+
+.. image:: https://governance.openstack.org/tc/badges/tempest.svg
+ :target: https://governance.openstack.org/tc/reference/tags/index.html
+
+.. Change things from this point on
+
+Design Principles
+-----------------
+Tempest Design Principles that we strive to live by.
+
+- Tempest should be able to run against any OpenStack cloud, be it a
+ one node DevStack install, a 20 node LXC cloud, or a 1000 node KVM
+ cloud.
+- Tempest should be explicit in testing features. It is easy to auto
+ discover features of a cloud incorrectly, and give people an
+ incorrect assessment of their cloud. Explicit is always better.
+- Tempest uses OpenStack public interfaces. Tests in Tempest should
+ only touch public OpenStack APIs.
+- Tempest should not touch private or implementation specific
+ interfaces. This means not directly going to the database, not
+ directly hitting the hypervisors, not testing extensions not
+ included in the OpenStack base. If there are some features of
+ OpenStack that are not verifiable through standard interfaces, this
+ should be considered a possible enhancement.
+- Tempest strives for complete coverage of the OpenStack API and
+ common scenarios that demonstrate a working cloud.
+- Tempest drives load in an OpenStack cloud. By including a broad
+ array of API and scenario tests Tempest can be reused in whole or in
+ parts as load generation for an OpenStack cloud.
+- Tempest should attempt to clean up after itself, whenever possible
+ we should tear down resources when done.
+- Tempest should be self-testing.
+
+Quickstart
+----------
+
+To run Tempest, you first need to create a configuration file that will tell
+Tempest where to find the various OpenStack services and other testing behavior
+switches. Where the configuration file lives and how you interact with it
+depends on how you'll be running Tempest. There are 2 methods of using Tempest.
+The first, which is a newer and recommended workflow treats Tempest as a system
+installed program. The second older method is to run Tempest assuming your
+working dir is the actually Tempest source repo, and there are a number of
+assumptions related to that. For this section we'll only cover the newer method
+as it is simpler, and quicker to work with.
+
+#. You first need to install Tempest. This is done with pip after you check out
+ the Tempest repo::
+
+ $ git clone https://opendev.org/openstack/tempest
+ $ pip install tempest/
+
+ This can be done within a venv, but the assumption for this guide is that
+ the Tempest CLI entry point will be in your shell's PATH.
+
+#. Installing Tempest may create a ``/etc/tempest dir``, however if one isn't
+ created you can create one or use ``~/.tempest/etc`` or ``~/.config/tempest`` in
+ place of ``/etc/tempest``. If none of these dirs are created Tempest will create
+ ``~/.tempest/etc`` when it's needed. The contents of this dir will always
+ automatically be copied to all ``etc/`` dirs in local workspaces as an initial
+ setup step. So if there is any common configuration you'd like to be shared
+ between local Tempest workspaces it's recommended that you pre-populate it
+ before running ``tempest init``.
+
+#. Setup a local Tempest workspace. This is done by using the tempest init
+ command::
+
+ $ tempest init cloud-01
+
+ which also works the same as::
+
+ $ mkdir cloud-01 && cd cloud-01 && tempest init
+
+ This will create a new directory for running a single Tempest configuration.
+ If you'd like to run Tempest against multiple OpenStack deployments the idea
+ is that you'll create a new working directory for each to maintain separate
+ configuration files and local artifact storage for each.
+
+#. Then ``cd`` into the newly created working dir and also modify the local
+ config files located in the ``etc/`` subdir created by the ``tempest init``
+ command. Tempest is expecting a ``tempest.conf`` file in etc/ so if only a
+ sample exists you must rename or copy it to tempest.conf before making
+ any changes to it otherwise Tempest will not know how to load it. For
+ details on configuring Tempest refer to the
+ `Tempest Configuration <https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
+
+#. Once the configuration is done you're now ready to run Tempest. This can
+ be done using the `Tempest Run <https://docs.openstack.org/tempest/latest/run.html#tempest-run>`_
+ command. This can be done by either
+ running::
+
+ $ tempest run
+
+ from the Tempest workspace directory. Or you can use the ``--workspace``
+ argument to run in the workspace you created regardless of your current
+ working directory. For example::
+
+ $ tempest run --workspace cloud-01
+
+ There is also the option to use `stestr`_ directly. For example, from
+ the workspace dir run::
+
+ $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
+
+ will run the same set of tests as the default gate jobs. Or you can
+ use `unittest`_ compatible test runners such as `stestr`_, `pytest`_ etc.
+
+ Tox also contains several existing job configurations. For example::
+
+ $ tox -e full
+
+ which will run the same set of tests as the OpenStack gate. (it's exactly how
+ the gate invokes Tempest) Or::
+
+ $ tox -e smoke
+
+ to run the tests tagged as smoke.
+
+.. _unittest: https://docs.python.org/3/library/unittest.html
+.. _stestr: https://stestr.readthedocs.org/en/latest/MANUAL.html
+.. _pytest: https://docs.pytest.org/en/latest/
+
+Library
+-------
+Tempest exposes a library interface. This interface is a stable interface and
+should be backwards compatible (including backwards compatibility with the
+old tempest-lib package, with the exception of the import). If you plan to
+directly consume Tempest in your project you should only import code from the
+Tempest library interface, other pieces of Tempest do not have the same
+stable interface and there are no guarantees on the Python API unless otherwise
+stated.
+
+For more details refer to the `library documentation
+<https://docs.openstack.org/tempest/latest/library.html#library>`_
+
+Release Versioning
+------------------
+`Tempest Release Notes <https://docs.openstack.org/releasenotes/tempest>`_
+shows what changes have been released on each version.
+
+Tempest's released versions are broken into 2 sets of information. Depending on
+how you intend to consume Tempest you might need
+
+The version is a set of 3 numbers:
+
+X.Y.Z
+
+While this is almost `semver`_ like, the way versioning is handled is slightly
+different:
+
+X is used to represent the supported OpenStack releases for Tempest tests
+in-tree, and to signify major feature changes to Tempest. It's a monotonically
+increasing integer where each version either indicates a new supported OpenStack
+release, the drop of support for an OpenStack release (which will coincide with
+the upstream stable branch going EOL), or a major feature lands (or is removed)
+from Tempest.
+
+Y.Z is used to represent library interface changes. This is treated the same
+way as minor and patch versions from `semver`_ but only for the library
+interface. When Y is incremented we've added functionality to the library
+interface and when Z is incremented it's a bug fix release for the library.
+Also note that both Y and Z are reset to 0 at each increment of X.
+
+.. _semver: https://semver.org/
+
+Configuration
+-------------
+
+Detailed configuration of Tempest is beyond the scope of this
+document, see `Tempest Configuration Documentation
+<https://docs.openstack.org/tempest/latest/configuration.html#tempest-configuration>`_
+for more details on configuring Tempest.
+The ``etc/tempest.conf.sample`` attempts to be a self-documenting
+version of the configuration.
+
+You can generate a new sample tempest.conf file, run the following
+command from the top level of the Tempest directory::
+
+ $ tox -e genconfig
+
+The most important pieces that are needed are the user ids, OpenStack
+endpoints, and basic flavors and images needed to run tests.
+
+Unit Tests
+----------
+
+Tempest also has a set of unit tests which test the Tempest code itself. These
+tests can be run by specifying the test discovery path::
+
+ $ stestr --test-path ./tempest/tests run
+
+By setting ``--test-path`` option to ./tempest/tests it specifies that test discover
+should only be run on the unit test directory. The default value of ``test_path``
+is ``test_path=./tempest/test_discover`` which will only run test discover on the
+Tempest suite.
+
+Alternatively, there are the py27 and py36 tox jobs which will run the unit
+tests with the corresponding version of python.
+
+One common activity is to just run a single test, you can do this with tox
+simply by specifying to just run py27 or py36 tests against a single test::
+
+ $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+
+Or all tests in the test_microversions.py file::
+
+ $ tox -e py36 -- -n tempest.tests.test_microversions
+
+You may also use regular expressions to run any matching tests::
+
+ $ tox -e py36 -- test_microversions
+
+Additionally, when running a single test, or test-file, the ``-n/--no-discover``
+argument is no longer required, however it may perform faster if included.
+
+For more information on these options and details about stestr, please see the
+`stestr documentation <https://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
+
+Python 3.x
+----------
+
+Starting during the Pike cycle Tempest has a gating CI job that runs Tempest
+with Python 3. Any Tempest release after 15.0.0 should fully support running
+under Python 3 as well as Python 2.7.
+
+Legacy run method
+-----------------
+
+The legacy method of running Tempest is to just treat the Tempest source code
+as a python unittest repository and run directly from the source repo. When
+running in this way you still start with a Tempest config file and the steps
+are basically the same except that it expects you know where the Tempest code
+lives on your system and requires a bit more manual interaction to get Tempest
+running. For example, when running Tempest this way things like a lock file
+directory do not get generated automatically and the burden is on the user to
+create and configure that.
+
+To start you need to create a configuration file. The easiest way to create a
+configuration file is to generate a sample in the ``etc/`` directory ::
+
+ $ cd $TEMPEST_ROOT_DIR
+ $ oslo-config-generator --config-file \
+ tempest/cmd/config-generator.tempest.conf \
+ --output-file etc/tempest.conf
+
+After that, open up the ``etc/tempest.conf`` file and edit the
+configuration variables to match valid data in your environment.
+This includes your Keystone endpoint, a valid user and credentials,
+and reference data to be used in testing.
+
+.. note::
+
+ If you have a running DevStack environment, Tempest will be
+ automatically configured and placed in ``/opt/stack/tempest``. It
+ will have a configuration file already set up to work with your
+ DevStack installation.
+
+Tempest is not tied to any single test runner, but `stestr`_ is the most commonly
+used tool. Also, the nosetests test runner is **not** recommended to run Tempest.
+
+After setting up your configuration file, you can execute the set of Tempest
+tests by using ``stestr``. By default, ``stestr`` runs tests in parallel ::
+
+ $ stestr run
+
+To run one single test serially ::
+
+ $ stestr run --serial tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
new file mode 100644
index 0000000..4f65fd4
--- /dev/null
+++ b/doc/source/supported_version.rst
@@ -0,0 +1,36 @@
+Supported OpenStack Releases and Python Versions
+================================================
+
+This document lists the officially supported OpenStack releases
+and python versions by Tempest.
+
+Compatible OpenStack Releases
+-----------------------------
+
+Tempest master supports the below OpenStack Releases:
+
+* Train
+* Stein
+* Rocky
+
+For older OpenStack Release:
+
+For any older OpenStack Release than the listed above, Tempest master might work. But if
+Tempest master starts failing then, you can use the respective Tempest tag listed in OpenStack
+release page.
+
+For example: OpenStack Stein: Tempest 20.0.0
+
+* https://releases.openstack.org/stein/index.html#stein-tempest
+
+How to use Tempest tag on Extended Maintenance stable branch:
+
+* https://review.opendev.org/#/c/705098/
+
+Supported Python Versions
+-------------------------
+
+Tempest master supports the below python versions:
+
+* Python 3.6
+* Python 3.7
diff --git a/releasenotes/notes/Extend-cleanup-CLI-to-delete-regions-9f1dbda2c8de12e2.yaml b/releasenotes/notes/Extend-cleanup-CLI-to-delete-regions-9f1dbda2c8de12e2.yaml
new file mode 100644
index 0000000..e2fc5b3
--- /dev/null
+++ b/releasenotes/notes/Extend-cleanup-CLI-to-delete-regions-9f1dbda2c8de12e2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ tempest cleanup CLI is extended about region deletion. Until now, the
+ regions have been neglected by tempest cleanup. From now on, tempest
+ cleanup is able to delete leftover regions as well.
diff --git a/releasenotes/notes/add-consistency-group-exceptions-01cbb792cd710231.yaml b/releasenotes/notes/add-consistency-group-exceptions-01cbb792cd710231.yaml
new file mode 100644
index 0000000..e879c2c
--- /dev/null
+++ b/releasenotes/notes/add-consistency-group-exceptions-01cbb792cd710231.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixed bug #1858417. Adding consistency group exceptions
+ ``ConsistencyGroupException`` and ``ConsistencyGroupSnapshotException``
+ that didn't exist before and caused failure in cinder-tempest-plugin.
diff --git a/releasenotes/notes/change-default-region-for-placement-to-empty-string-394f1132c28345bb.yaml b/releasenotes/notes/change-default-region-for-placement-to-empty-string-394f1132c28345bb.yaml
new file mode 100644
index 0000000..a28e4e2
--- /dev/null
+++ b/releasenotes/notes/change-default-region-for-placement-to-empty-string-394f1132c28345bb.yaml
@@ -0,0 +1,13 @@
+---
+upgrade:
+ - |
+ Default value of config option ``CONF.placement.region`` is updated
+ from ``RegionOne`` to empty string.
+
+ As per tempest design, if tempest conf is not having any region for
+ a service then identity region should be used. In case of placement
+ the default value is "RegionOne" which is considered as placement
+ region if region missing in tempest conf. In order to have identity
+ region to be used as default we need to change to empty string for
+ placement service. Empty string can be seen being used in other
+ services like volume, image etc.
diff --git a/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml b/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml
new file mode 100644
index 0000000..6bbb381
--- /dev/null
+++ b/releasenotes/notes/deprecate-compute-feature-enabled-block-migrate-cinder-iscsi-fcda802d774dfeec.yaml
@@ -0,0 +1,8 @@
+---
+deprecations:
+ - |
+ The ``[compute-feature-enabled]/block_migrate_cinder_iscsi`` is deprecated
+ ahead of removal in a future release. Once removed the
+ ``[compute-feature-enabled]/block_migration_for_live_migration``
+ configurable will then be used to determine when to run block migration
+ based tests during live migration.
diff --git a/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
new file mode 100644
index 0000000..58b161f
--- /dev/null
+++ b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The config options ``CONF.compute.spice_console`` and ``CONF.compute.rdp_console``
+ are deprecated because test cases using them are removed.
+ We can add them back when adding the test cases again.
diff --git a/releasenotes/notes/deprecate-vnc-server-header-529f07d592aefb62.yaml b/releasenotes/notes/deprecate-vnc-server-header-529f07d592aefb62.yaml
new file mode 100644
index 0000000..d7e144d
--- /dev/null
+++ b/releasenotes/notes/deprecate-vnc-server-header-529f07d592aefb62.yaml
@@ -0,0 +1,12 @@
+---
+deprecations:
+ - |
+ The config option ``CONF.compute.vnc_server_header`` is deprecated because
+ it has become obsolete with the usage of different response header fields
+ to accomplish the same goal in accordance with RFC7231 Section 6.2.2.
+
+fixes:
+ - |
+ Adequately validates WebSocket upgrade in test_novnc and removes unneeded
+ configuration complexity. Closes bug #1838777 and #1840788.
+
diff --git a/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml b/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml
new file mode 100644
index 0000000..a0ac244
--- /dev/null
+++ b/releasenotes/notes/drop-py-2-7-730baf411876d5d8.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Python 2.7 support has been dropped. Last release of Tempest
+ to support python 2.7 is Temepst 23.0.0. The minimum version of Python now
+ supported by Tempest is Python 3.6.
diff --git a/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml b/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml
new file mode 100644
index 0000000..99ef31e
--- /dev/null
+++ b/releasenotes/notes/drop-py-3-5-support-76ca78f1a650fcad.yaml
@@ -0,0 +1,8 @@
+---
+prelude: >
+ Remove the support of python3.5.
+ Tempest, its plugins dependencies in ussuri cycle
+ are python-requires>=py3.6 which makes distro not
+ having python 3.6 to do hack to install py3.6 etc.
+ It time to drop the py3.5 from Tempest. Last supported
+ version of Tempest for py3.5 is 23.0.0.
diff --git a/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml b/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
new file mode 100644
index 0000000..4842f63
--- /dev/null
+++ b/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Bug#1847749. This provides the workaround of Skip Exception raised instead of skipping
+ the CLI tests. If you are running Tempest with stestr > 2.5.0 then use this fix.
+ Ref- https://github.com/testing-cabal/testtools/issues/272
diff --git a/releasenotes/notes/fix-credential-logging-98089c897d801355.yaml b/releasenotes/notes/fix-credential-logging-98089c897d801355.yaml
new file mode 100644
index 0000000..9534a72
--- /dev/null
+++ b/releasenotes/notes/fix-credential-logging-98089c897d801355.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ A new kwarg, ``log_req_body``, was added to the
+ ``tempest.lib.common.rest_client.RestClient`` method ``raw_request()``.
+ This kwarg takes in a string which will be used in place of the request
+ body, which is logged by default. The intent of this option is to be used
+ for security reasons to avoid logging sensitive information that are part
+ of request bodies.
diff --git a/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml b/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
new file mode 100644
index 0000000..56f160a
--- /dev/null
+++ b/releasenotes/notes/intermediate-ussuri-release-8aebeca312a6718c.yaml
@@ -0,0 +1,14 @@
+---
+prelude: |
+ This is an intermediate release during the Ussuri development cycle to
+ mark the end of support for EM Queens in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Train
+ * Stein
+ * Rocky
+
+ Current development of Tempest is for OpenStack Ussuri development
+ cycle.
+
+ This is the last release of Tempest to officially support python2.7.
diff --git a/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml b/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml
new file mode 100644
index 0000000..936bf1f
--- /dev/null
+++ b/releasenotes/notes/os_tenant_name-3ee175763bff455b.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Remove the deprecated argument ``os-tenant-name`` or ``OS_TENANT_NAME`` in favour of
+ ``os-project-name`` argument.
diff --git a/releasenotes/notes/subunit_describe_calls-ad7df689b9d63e3f.yaml b/releasenotes/notes/subunit_describe_calls-ad7df689b9d63e3f.yaml
new file mode 100644
index 0000000..e7fc3a0
--- /dev/null
+++ b/releasenotes/notes/subunit_describe_calls-ad7df689b9d63e3f.yaml
@@ -0,0 +1,8 @@
+---
+deprecations:
+ - |
+ Deprecated command for subunit-describe-calls
+
+features:
+ - |
+ Added new tempest subcommand for subunit-describe-calls
diff --git a/releasenotes/notes/tempest-train-release-a2ed0743a5eadeb6.yaml b/releasenotes/notes/tempest-train-release-a2ed0743a5eadeb6.yaml
new file mode 100644
index 0000000..960b0b2
--- /dev/null
+++ b/releasenotes/notes/tempest-train-release-a2ed0743a5eadeb6.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+ This release is to tag the Tempest for OpenStack Train release.
+ This release marks the start of Train release support in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Train
+ * Stein
+ * Rocky
+ * Queens
+
+ Current development of Tempest is for OpenStack Ussuri development
+ cycle. Every Tempest commit is also tested against master during
+ the Ussuri cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Ussuri (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Train release.
diff --git a/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml b/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml
new file mode 100644
index 0000000..ce401ff
--- /dev/null
+++ b/releasenotes/notes/verify-tempest-command-8e88452c7a08dd77.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Remove the deprecated CLI ``verify-tempest-config`` in favour of
+ ``tempest verify-config`` command.
+ You can use ``tempest verify-config`` CLI to verify the tempest
+ conf file.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 57ec7e1..92df4c4 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -46,9 +46,6 @@
bug_project = 'tempest'
bug_tag = ''
-# Must set this variable to include year, month, day, hours, and minutes.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -62,7 +59,6 @@
master_doc = 'index'
# General information about the project.
-project = u'tempest Release Notes'
copyright = u'2016, tempest Developers'
# Release do not need a version number in the title, they
@@ -193,17 +189,6 @@
# -- Options for LaTeX output ---------------------------------------------
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- # 'preamble': '',
-}
-
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 1d0d914..bfd8b2d 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,9 @@
:maxdepth: 1
unreleased
+ v23.0.0
+ v22.1.0
+ v22.0.0
v21.0.0
v20.0.0
v19.0.0
diff --git a/releasenotes/source/v22.0.0.rst b/releasenotes/source/v22.0.0.rst
new file mode 100644
index 0000000..519b081
--- /dev/null
+++ b/releasenotes/source/v22.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v22.0.0 Release Notes
+=====================
+
+.. release-notes:: 22.0.0 Release Notes
+ :version: 22.0.0
diff --git a/releasenotes/source/v22.1.0.rst b/releasenotes/source/v22.1.0.rst
new file mode 100644
index 0000000..6a4fd1f
--- /dev/null
+++ b/releasenotes/source/v22.1.0.rst
@@ -0,0 +1,6 @@
+=====================
+v22.1.0 Release Notes
+=====================
+
+.. release-notes:: 22.1.0 Release Notes
+ :version: 22.1.0
diff --git a/releasenotes/source/v23.0.0.rst b/releasenotes/source/v23.0.0.rst
new file mode 100644
index 0000000..7c5edf8
--- /dev/null
+++ b/releasenotes/source/v23.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v23.0.0 Release Notes
+=====================
+
+.. release-notes:: 23.0.0 Release Notes
+ :version: 23.0.0
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index e1787b6..1f7fb70 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -67,3 +67,21 @@
::
vars:
tox_extra_args: --sitepackages
+
+.. zuul:rolevar:: tempest_test_timeout
+ :default: ''
+
+ The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+ :default: ''
+
+ Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+ :default: ''
+
+ Environment variable to set for run-tempst task.
+
+ Env variables set in this variable will be combined with some more
+ defaults env variable set at runtime.
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index 06918b5..5867b6c 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -3,3 +3,7 @@
tox_envlist: smoke
tempest_black_regex: ''
tox_extra_args: ''
+tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 16086aa..8686f9a 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -20,6 +20,21 @@
default_concurrency: "{{ num_cores|int // 2 }}"
when: num_cores|int > 3
+- name: Override target branch
+ set_fact:
+ target_branch: "{{ zuul.override_checkout }}"
+ when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/rocky
+ set_fact:
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+ when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky"]
+
+- name: Set OS_TEST_TIMEOUT if requested
+ set_fact:
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+ when: tempest_test_timeout != ''
+
- when:
- tempest_test_blacklist is defined
block:
@@ -42,3 +57,4 @@
chdir: "{{devstack_base_dir}}/tempest"
become: true
become_user: tempest
+ environment: "{{ tempest_tox_environment }}"
diff --git a/setup.cfg b/setup.cfg
index 5c1d24c..d246c68 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,6 +6,7 @@
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/tempest/latest/
+requires-python = >=3.6
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
@@ -13,12 +14,11 @@
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3 :: Only
+ Programming Language :: Python :: Implementation :: CPython
[files]
packages =
@@ -28,7 +28,6 @@
[entry_points]
console_scripts =
- verify-tempest-config = tempest.cmd.verify_tempest_config:main
tempest-account-generator = tempest.cmd.account_generator:main
tempest = tempest.cmd.main:main
skip-tracker = tempest.lib.cmd.skip_tracker:main
@@ -36,6 +35,7 @@
subunit-describe-calls = tempest.cmd.subunit_describe_calls:entry_point
tempest.cm =
account-generator = tempest.cmd.account_generator:TempestAccountGenerator
+ subunit-describe-calls = tempest.cmd.subunit_describe_calls:TempestSubunitDescribeCalls
init = tempest.cmd.init:TempestInit
cleanup = tempest.cmd.cleanup:TempestCleanup
list-plugins = tempest.cmd.list_plugins:TempestListPlugins
diff --git a/setup.py b/setup.py
index 566d844..f63cc23 100644
--- a/setup.py
+++ b/setup.py
@@ -16,14 +16,6 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index b1a7c52..836b975 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -31,6 +31,13 @@
class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
+ # These tests don't attempt any SSH validation nor do they use
+ # floating IPs on the instance, so all we need is a network and
+ # a subnet so the instance being migrated has a single port, but
+ # we need that to make sure we are properly updating the port
+ # host bindings during the live migration.
+ create_default_network = True
+
@classmethod
def skip_checks(cls):
super(LiveMigrationTestBase, cls).skip_checks()
@@ -44,16 +51,6 @@
"Less than 2 compute nodes, skipping migration test.")
@classmethod
- def setup_credentials(cls):
- # These tests don't attempt any SSH validation nor do they use
- # floating IPs on the instance, so all we need is a network and
- # a subnet so the instance being migrated has a single port, but
- # we need that to make sure we are properly updating the port
- # host bindings during the live migration.
- cls.set_network_resources(network=True, subnet=True)
- super(LiveMigrationTestBase, cls).setup_credentials()
-
- @classmethod
def setup_clients(cls):
super(LiveMigrationTestBase, cls).setup_clients()
cls.admin_migration_client = cls.os_admin.migrations_client
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
deleted file mode 100644
index bca6a22..0000000
--- a/tempest/api/compute/admin/test_security_group_default_rules.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-
-from tempest.api.compute import base
-from tempest import config
-from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
-
-CONF = config.CONF
-
-
-class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
- max_microversion = '2.35'
-
- @classmethod
- # TODO(GMann): Once Bug# 1311500 is fixed, these test can run
- # for Neutron also.
- @testtools.skipIf(CONF.service_available.neutron,
- "Skip as this functionality is not yet "
- "implemented in Neutron. Related Bug#1311500")
- def setup_credentials(cls):
- # A network and a subnet will be created for these tests
- cls.set_network_resources(network=True, subnet=True)
- super(SecurityGroupDefaultRulesTest, cls).setup_credentials()
-
- @classmethod
- def setup_clients(cls):
- super(SecurityGroupDefaultRulesTest, cls).setup_clients()
- cls.adm_client = cls.os_admin.security_group_default_rules_client
-
- def _create_security_group_default_rules(self, ip_protocol='tcp',
- from_port=22, to_port=22,
- cidr='10.10.0.0/24'):
- # Create Security Group default rule
- rule = self.adm_client.create_security_default_group_rule(
- ip_protocol=ip_protocol,
- from_port=from_port,
- to_port=to_port,
- cidr=cidr)['security_group_default_rule']
- self.assertEqual(ip_protocol, rule['ip_protocol'])
- self.assertEqual(from_port, rule['from_port'])
- self.assertEqual(to_port, rule['to_port'])
- self.assertEqual(cidr, rule['ip_range']['cidr'])
- return rule
-
- @decorators.idempotent_id('6d880615-eec3-4d29-97c5-7a074dde239d')
- def test_create_delete_security_group_default_rules(self):
- # Create and delete Security Group default rule
- ip_protocols = ['tcp', 'udp', 'icmp']
- for ip_protocol in ip_protocols:
- rule = self._create_security_group_default_rules(ip_protocol)
- # Delete Security Group default rule
- self.adm_client.delete_security_group_default_rule(rule['id'])
- self.assertRaises(lib_exc.NotFound,
- self.adm_client.show_security_group_default_rule,
- rule['id'])
-
- @decorators.idempotent_id('4d752e0a-33a1-4c3a-b498-ff8667ca22e5')
- def test_create_security_group_default_rule_without_cidr(self):
- ip_protocol = 'udp'
- from_port = 80
- to_port = 80
- rule = self.adm_client.create_security_default_group_rule(
- ip_protocol=ip_protocol,
- from_port=from_port,
- to_port=to_port)['security_group_default_rule']
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- self.assertNotEqual(0, rule['id'])
- self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
-
- @decorators.idempotent_id('29f2d218-69b0-4a95-8f3d-6bd0ef732b3a')
- def test_create_security_group_default_rule_with_blank_cidr(self):
- ip_protocol = 'icmp'
- from_port = 10
- to_port = 10
- cidr = ''
- rule = self.adm_client.create_security_default_group_rule(
- ip_protocol=ip_protocol,
- from_port=from_port,
- to_port=to_port,
- cidr=cidr)['security_group_default_rule']
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- self.assertNotEqual(0, rule['id'])
- self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
-
- @decorators.idempotent_id('6e6de55e-9146-4ae0-89f2-3569586e0b9b')
- def test_security_group_default_rules_list(self):
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- cidr = '10.10.0.0/24'
- rule = self._create_security_group_default_rules(ip_protocol,
- from_port,
- to_port,
- cidr)
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- rules = (self.adm_client.list_security_group_default_rules()
- ['security_group_default_rules'])
- self.assertNotEmpty(rules)
- self.assertIn(rule, rules)
-
- @decorators.idempotent_id('15cbb349-86b4-4f71-a048-04b7ef3f150b')
- def test_default_security_group_default_rule_show(self):
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- cidr = '10.10.0.0/24'
- rule = self._create_security_group_default_rules(ip_protocol,
- from_port,
- to_port,
- cidr)
- self.addCleanup(self.adm_client.delete_security_group_default_rule,
- rule['id'])
- fetched_rule = self.adm_client.show_security_group_default_rule(
- rule['id'])['security_group_default_rule']
- self.assertEqual(rule, fetched_rule)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index aaf7a5a..8d0962d 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -39,6 +39,9 @@
"""Base test case class for all Compute API tests."""
force_tenant_isolation = False
+ # Set this to True in subclasses to create a default network. See
+ # https://bugs.launchpad.net/tempest/+bug/1844568
+ create_default_network = False
# TODO(andreaf) We should care also for the alt_manager here
# but only once client lazy load in the manager is done
@@ -49,16 +52,22 @@
super(BaseV2ComputeTest, cls).skip_checks()
if not CONF.service_available.nova:
raise cls.skipException("Nova is not available")
- cfg_min_version = CONF.compute.min_microversion
- cfg_max_version = CONF.compute.max_microversion
- api_version_utils.check_skip_with_microversion(cls.min_microversion,
- cls.max_microversion,
- cfg_min_version,
- cfg_max_version)
+ api_version_utils.check_skip_with_microversion(
+ cls.min_microversion, cls.max_microversion,
+ CONF.compute.min_microversion, CONF.compute.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.volume_min_microversion, cls.volume_max_microversion,
+ CONF.volume.min_microversion, CONF.volume.max_microversion)
+ api_version_utils.check_skip_with_microversion(
+ cls.placement_min_microversion, cls.placement_max_microversion,
+ CONF.placement.min_microversion, CONF.placement.max_microversion)
@classmethod
def setup_credentials(cls):
- cls.set_network_resources()
+ # Setting network=True, subnet=True creates a default network
+ cls.set_network_resources(
+ network=cls.create_default_network,
+ subnet=cls.create_default_network)
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
@@ -145,6 +154,14 @@
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
+ cls.volume_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.volume_min_microversion,
+ CONF.volume.min_microversion))
+ cls.placement_request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.placement_min_microversion,
+ CONF.placement.min_microversion))
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
@@ -470,7 +487,9 @@
def setUp(self):
super(BaseV2ComputeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- compute_microversion=self.request_microversion))
+ compute_microversion=self.request_microversion,
+ volume_microversion=self.volume_request_microversion,
+ placement_microversion=self.placement_request_microversion))
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
@@ -536,11 +555,17 @@
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
- # On teardown detach the volume and wait for it to be available. This
- # is so we don't error out when trying to delete the volume during
- # teardown.
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.volumes_client, volume['id'], 'available')
+ # On teardown detach the volume and for multiattach volumes wait for
+ # the attachment to be removed. For non-multiattach volumes wait for
+ # the state of the volume to change to available. This is so we don't
+ # error out when trying to delete the volume during teardown.
+ if volume['multiattach']:
+ self.addCleanup(waiters.wait_for_volume_attachment_remove,
+ self.volumes_client, volume['id'],
+ attachment['id'])
+ else:
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, server, volume)
@@ -606,8 +631,14 @@
svcs = self.os_admin.services_client.list_services(
binary='nova-compute')['services']
- hosts = [svc['host'] for svc in svcs
- if svc['state'] == 'up' and svc['status'] == 'enabled']
+ hosts = []
+ for svc in svcs:
+ if svc['state'] == 'up' and svc['status'] == 'enabled':
+ if CONF.compute.compute_volume_common_az:
+ if svc['zone'] == CONF.compute.compute_volume_common_az:
+ hosts.append(svc['host'])
+ else:
+ hosts.append(svc['host'])
for target_host in hosts:
if source_host != target_host:
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 7cf26fb..eef2781 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -12,12 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-import testtools
+from tempest.lib import exceptions as lib_exceptions
CONF = config.CONF
@@ -51,12 +53,23 @@
# in task_state image_snapshot
self.addCleanup(waiters.wait_for_server_status, self.servers_client,
server['id'], 'ACTIVE')
- image = self.create_image_from_server(server['id'],
- wait_until='SAVING')
- self.client.delete_image(image['id'])
- msg = ('The image with ID {image_id} failed to be deleted'
- .format(image_id=image['id']))
- self.assertTrue(self.client.is_resource_deleted(image['id']), msg)
+ snapshot_name = data_utils.rand_name('test-snap')
+ try:
+ image = self.create_image_from_server(server['id'],
+ name=snapshot_name,
+ wait_until='SAVING')
+ self.client.delete_image(image['id'])
+ msg = ('The image with ID {image_id} failed to be deleted'
+ .format(image_id=image['id']))
+ self.assertTrue(self.client.is_resource_deleted(image['id']),
+ msg)
+ self.assertEqual(snapshot_name, image['name'])
+ except lib_exceptions.TimeoutException as ex:
+ # If timeout is reached, we don't need to check state,
+ # since, it wouldn't be a 'SAVING' state atleast and apart from
+ # it, this testcase doesn't have scope for other state transition
+ # Hence, skip the test.
+ raise self.skipException("This test is skipped because " + str(ex))
@decorators.idempotent_id('aaacd1d0-55a2-4ce8-818a-b5439df8adc9')
def test_create_image_from_stopped_server(self):
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 3c152c9..b811421 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -15,7 +15,6 @@
from tempest.api.compute import base
from tempest import config
-from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -101,11 +100,5 @@
# will return 400(Bad Request) if we attempt to send a name which has
# 4 byte utf-8 character.
utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
- body = self.compute_images_client.create_image(
- self.server_id, name=utf8_name)
- if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", body.response, "lt"):
- image_id = body['image_id']
- else:
- image_id = data_utils.parse_image_id(body.response['location'])
- self.addCleanup(self.client.delete_image, image_id)
+ self.create_image_from_server(self.server_id, name=utf8_name,
+ wait_until='ACTIVE')
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index 49125d1..ef69a13 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -24,18 +24,14 @@
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
max_microversion = '2.35'
+ create_default_network = True
+
@classmethod
def skip_checks(cls):
super(BaseSecurityGroupsTest, cls).skip_checks()
if not utils.get_service_list()['network']:
raise cls.skipException("network service not enabled.")
- @classmethod
- def setup_credentials(cls):
- # A network and a subnet will be created for these tests
- cls.set_network_resources(network=True, subnet=True)
- super(BaseSecurityGroupsTest, cls).setup_credentials()
-
@staticmethod
def generate_random_security_group_id():
if (CONF.service_available.neutron and
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 3789aa0..df8da07 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -24,6 +24,7 @@
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils.linux import remote_client
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -78,6 +79,9 @@
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE')
+ # NOTE(mgoddard): Get detailed server to ensure addresses are present
+ # in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
# NOTE(artom) self.create_test_server adds cleanups, but this is
# apparently not enough? Add cleanup here.
self.addCleanup(self.delete_server, server['id'])
@@ -150,7 +154,9 @@
def _test_create_interface_by_port_id(self, server, ifs):
network_id = ifs[0]['net_id']
- port = self.ports_client.create_port(network_id=network_id)
+ port = self.ports_client.create_port(
+ network_id=network_id,
+ name=data_utils.rand_name(self.__class__.__name__))
port_id = port['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
iface = self.interfaces_client.create_interface(
@@ -291,7 +297,9 @@
"""
network = self.get_tenant_network()
network_id = network['id']
- port = self.ports_client.create_port(network_id=network_id)
+ port = self.ports_client.create_port(
+ network_id=network_id,
+ name=data_utils.rand_name(self.__class__.__name__))
port_id = port['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
@@ -314,6 +322,9 @@
self.addCleanup(self.delete_server, server['id'])
for server in servers:
+ # NOTE(mgoddard): Get detailed server to ensure addresses are
+ # present in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
self._wait_for_validation(server, validation_resources)
# attach the port to the server
iface = self.interfaces_client.create_interface(
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 0263b81..a7db88a 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -26,6 +26,7 @@
class DeleteServersTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
# NOTE: Server creations of each test class should be under 10
# for preventing "Quota exceeded for instances"
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index e817587..1f7eb7b 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -175,11 +175,13 @@
# Create ports
self.port1 = self.ports_client.create_port(
network_id=net1['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
fixed_ips=[{'subnet_id': subnet1['id']}])['port']
self.addCleanup(self.ports_client.delete_port, self.port1['id'])
self.port2 = self.ports_client.create_port(
network_id=net1['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
fixed_ips=[{'subnet_id': subnet1['id']}])['port']
self.addCleanup(self.ports_client.delete_port, self.port2['id'])
@@ -356,9 +358,14 @@
validation_resources=validation_resources,
config_drive=config_drive_enabled,
name=data_utils.rand_name('device-tagging-server'),
- networks=[{'uuid': self.get_tenant_network()['id']}])
+ networks=[{'uuid': self.get_tenant_network()['id']}],
+ wait_until='ACTIVE')
self.addCleanup(self.delete_server, server['id'])
+ # NOTE(mgoddard): Get detailed server to ensure addresses are present
+ # in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
+
# Attach tagged nic and volume
interface = self.interfaces_client.create_interface(
server['id'], net_id=net['id'],
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index bc48069..5b8e7ab 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -24,6 +24,7 @@
class ServerDiskConfigTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index b916a42..00837eb 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -19,6 +19,7 @@
class InstanceActionsTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
@@ -54,6 +55,7 @@
class InstanceActionsV221TestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
min_microversion = '2.21'
max_microversion = 'latest'
diff --git a/tempest/api/compute/servers/test_instance_actions_negative.py b/tempest/api/compute/servers/test_instance_actions_negative.py
index 1d3a790..4b5a2c3 100644
--- a/tempest/api/compute/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/servers/test_instance_actions_negative.py
@@ -20,6 +20,7 @@
class InstanceActionsNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index f0915de..b95db5c 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -20,6 +20,7 @@
class ListServersNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 059454d..e176251 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -19,6 +19,7 @@
class MultipleCreateTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@decorators.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
def test_multiple_create(self):
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 50ffb21..68e09e7 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -33,6 +33,7 @@
class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -156,22 +157,24 @@
data[20:24])[0] + 24))
def _validate_websocket_upgrade(self):
+ """Verify that the websocket upgrade was successful.
+
+ Parses response and ensures that required response
+ fields are present and accurate.
+ (https://tools.ietf.org/html/rfc7231#section-6.2.2)
+ """
+
self.assertTrue(
self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
- b'Protocols\r\n'),
- 'Did not get the expected 101 on the {} call: {}'.format(
- CONF.compute_feature_enabled.vnc_server_header,
+ b'Protocols'),
+ 'Incorrect HTTP return status code: {}'.format(
six.text_type(self._websocket.response)
)
)
- # Since every other server type returns Headers with different case
- # (for example 'nginx'), lowercase must be applied to eliminate issues.
- _desired_header = "server: {0}".format(
- CONF.compute_feature_enabled.vnc_server_header
- ).lower()
+ _required_header = 'upgrade: websocket'
_response = six.text_type(self._websocket.response).lower()
self.assertIn(
- _desired_header,
+ _required_header,
_response,
'Did not get the expected WebSocket HTTP Response.'
)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index d47ff51..d477be0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -343,17 +343,27 @@
def test_resize_volume_backed_server_confirm(self):
# We have to create a new server that is volume-backed since the one
# from setUp is not volume-backed.
- server = self.create_test_server(
- volume_backed=True, wait_until='ACTIVE')
+ kwargs = {'volume_backed': True,
+ 'wait_until': 'ACTIVE'}
+ if CONF.validation.run_validation:
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ kwargs.update({'validatable': True,
+ 'validation_resources': validation_resources})
+ server = self.create_test_server(**kwargs)
+
+ # NOTE(mgoddard): Get detailed server to ensure addresses are present
+ # in fixed IP case.
+ server = self.servers_client.show_server(server['id'])['server']
+
self._test_resize_server_confirm(server['id'])
+
if CONF.compute_feature_enabled.console_output:
# Now do something interactive with the guest like get its console
# output; we don't actually care about the output,
# just that it doesn't raise an error.
self.client.get_console_output(server['id'])
if CONF.validation.run_validation:
- validation_resources = self.get_class_validation_resources(
- self.os_primary)
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
self.ssh_user,
@@ -704,16 +714,13 @@
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled.')
def test_get_vnc_console(self):
- # Get the VNC console of type 'novnc' and 'xvpvnc'
- console_types = ['novnc', 'xvpvnc']
- for console_type in console_types:
- if self.is_requested_microversion_compatible('2.5'):
- body = self.client.get_vnc_console(
- self.server_id, type=console_type)['console']
- else:
- body = self.client.get_remote_console(
- self.server_id, console_type=console_type,
- protocol='vnc')['remote_console']
- self.assertEqual(console_type, body['type'])
- self.assertNotEqual('', body['url'])
- self._validate_url(body['url'])
+ if self.is_requested_microversion_compatible('2.5'):
+ body = self.client.get_vnc_console(
+ self.server_id, type='novnc')['console']
+ else:
+ body = self.client.get_remote_console(
+ self.server_id, console_type='novnc',
+ protocol='vnc')['remote_console']
+ self.assertEqual('novnc', body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index f79b05f..c936ce5 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -19,12 +19,7 @@
class ServerAddressesTestJSON(base.BaseV2ComputeTest):
-
- @classmethod
- def setup_credentials(cls):
- # This test module might use a network and a subnet
- cls.set_network_resources(network=True, subnet=True)
- super(ServerAddressesTestJSON, cls).setup_credentials()
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_addresses_negative.py b/tempest/api/compute/servers/test_server_addresses_negative.py
index b2b3cc0..f33c6d9 100644
--- a/tempest/api/compute/servers/test_server_addresses_negative.py
+++ b/tempest/api/compute/servers/test_server_addresses_negative.py
@@ -20,11 +20,7 @@
class ServerAddressesNegativeTestJSON(base.BaseV2ComputeTest):
-
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources(network=True, subnet=True)
- super(ServerAddressesNegativeTestJSON, cls).setup_credentials()
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 1b7cb96..4b5efaa 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -29,6 +29,7 @@
policies = affinity/anti-affinity
It also adds the tests for list and get details of server-groups
"""
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index fe95018..9d87e1c 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -18,6 +18,7 @@
class ServerMetadataTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index e6a668a..7b31ede 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -19,6 +19,7 @@
class ServerPasswordTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/servers/test_server_tags.py b/tempest/api/compute/servers/test_server_tags.py
index 8d0a4e3..3893b01 100644
--- a/tempest/api/compute/servers/test_server_tags.py
+++ b/tempest/api/compute/servers/test_server_tags.py
@@ -26,6 +26,8 @@
min_microversion = '2.26'
max_microversion = 'latest'
+ create_default_network = True
+
@classmethod
def skip_checks(cls):
super(ServerTagsTestJSON, cls).skip_checks()
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 76d65dd..3a4bd6d 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -25,6 +25,7 @@
class ServersTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def setup_clients(cls):
@@ -209,7 +210,8 @@
server['id'], 'ACTIVE')
# Check rebuild API response schema
- self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+ self.servers_client.rebuild_server(
+ server['id'], CONF.compute.certified_image_ref)
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 6cabf65..7fa30b0 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -30,6 +30,7 @@
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
+ create_default_network = True
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
@@ -554,6 +555,7 @@
class ServersNegativeTestMultiTenantJSON(base.BaseV2ComputeTest):
+ create_default_network = True
credentials = ['primary', 'alt']
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index f810ec5..dfd6ca4 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -32,11 +32,7 @@
depends_on_nova_network = True
- @classmethod
- def setup_credentials(cls):
- # This test needs a network and a subnet
- cls.set_network_resources(network=True, subnet=True)
- super(VirtualInterfacesTestJSON, cls).setup_credentials()
+ create_default_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index f83e62c..97813a5 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -28,6 +28,7 @@
class BaseAttachVolumeTest(base.BaseV2ComputeTest):
"""Base class for the attach volume tests in this module."""
+ create_default_network = True
@classmethod
def skip_checks(cls):
@@ -41,11 +42,6 @@
cls.prepare_instance_network()
super(BaseAttachVolumeTest, cls).setup_credentials()
- @classmethod
- def resource_setup(cls):
- super(BaseAttachVolumeTest, cls).resource_setup()
- cls.device = CONF.compute.volume_device_name
-
def _create_server(self):
# Start a server and wait for it to become ready
validation_resources = self.get_test_validation_resources(
@@ -84,15 +80,18 @@
# NOTE(andreaf) We need to ensure the ssh key has been
# injected in the guest before we power cycle
linux_client.validate_authentication()
+ disks_before_attach = linux_client.list_disks()
volume = self.create_volume()
# NOTE: As of the 12.0.0 Liberty release, the Nova libvirt driver
- # no longer honors a user-supplied device name, in that case
- # CONF.compute.volume_device_name must be set the equal value as
- # the libvirt auto-assigned one
- attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ # no longer honors a user-supplied device name, and there can be
+ # a mismatch between libvirt provide disk name and actual disk name
+ # on instance, hence we no longer validate this test with the supplied
+ # device name rather we count number of disk before attach
+ # detach to validate the testcase.
+
+ attachment = self.attach_volume(server, volume)
self.servers_client.stop_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
@@ -103,9 +102,10 @@
'ACTIVE')
if CONF.validation.run_validation:
- disks = linux_client.get_disks()
- device_name_to_match = '\n' + self.device + ' '
- self.assertIn(device_name_to_match, disks)
+ disks_after_attach = linux_client.list_disks()
+ self.assertGreater(
+ len(disks_after_attach),
+ len(disks_before_attach))
self.servers_client.detach_volume(server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
@@ -120,8 +120,8 @@
'ACTIVE')
if CONF.validation.run_validation:
- disks = linux_client.get_disks()
- self.assertNotIn(device_name_to_match, disks)
+ disks_after_detach = linux_client.list_disks()
+ self.assertEqual(len(disks_before_attach), len(disks_after_detach))
@decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
def test_list_get_volume_attachments(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 6d08f90..9a506af 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -21,6 +21,7 @@
class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
+ create_default_network = True
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 9022b2d..cb8ea11 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -44,29 +44,24 @@
@classmethod
def resource_setup(cls):
super(ListProjectsTestJSON, cls).resource_setup()
- cls.project_ids = list()
- cls.domain_id = cls.os_admin.credentials.domain_id
+ domain_id = cls.os_admin.credentials.domain_id
# Create project with domain
- cls.p1_name = data_utils.rand_name('project')
+ p1_name = data_utils.rand_name(cls.__name__)
cls.p1 = cls.projects_client.create_project(
- cls.p1_name, enabled=False,
- domain_id=cls.domain_id)['project']
+ p1_name, enabled=False, domain_id=domain_id)['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p1['id'])
- cls.project_ids.append(cls.p1['id'])
# Create default project
- p2_name = data_utils.rand_name('project')
+ p2_name = data_utils.rand_name(cls.__name__)
cls.p2 = cls.projects_client.create_project(p2_name)['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p2['id'])
- cls.project_ids.append(cls.p2['id'])
# Create a new project (p3) using p2 as parent project
- p3_name = data_utils.rand_name('project')
+ p3_name = data_utils.rand_name(cls.__name__)
cls.p3 = cls.projects_client.create_project(
p3_name, parent_id=cls.p2['id'])['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p3['id'])
- cls.project_ids.append(cls.p3['id'])
@decorators.idempotent_id('0fe7a334-675a-4509-b00e-1c4b95d5dae8')
def test_list_projects_with_enabled(self):
@@ -98,7 +93,7 @@
cls.p1 = cls.projects_client.show_project(
cls.os_primary.credentials.project_id)['project']
# Create a test project
- p2_name = data_utils.rand_name('project')
+ p2_name = data_utils.rand_name(cls.__name__)
p2_domain_id = CONF.identity.default_domain_id
cls.p2 = cls.projects_client.create_project(
p2_name, domain_id=p2_domain_id)['project']
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 0b85b19..e46145d 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -230,8 +230,14 @@
_projects = self.projects_client.list_projects()['projects']
project_list = next(x for x in _projects if x['id'] == project['id'])
- # Assert the list of fields is correct (one is enough to check here)
- self.assertSetEqual(set(fields), set(project_get.keys()))
+ # Assert the expected fields exist. More fields than expected may
+ # be in this list. This is for future proofind as keystone does not
+ # and has no plans to support microservices. Any fields in the future
+ # that are added to the response of the API should eventually be added
+ # to the expected fields. The expected fields must be a subset of
+ # the project_get fields (all keys in fields must exist in project_get,
+ # but project_get.keys() may have additional fields)
+ self.assertTrue(set(fields).issubset(project_get.keys()))
# Ensure the set of tags is identical and match the expected one
get_tags = set(project_get.pop("tags"))
diff --git a/tempest/api/identity/v3/test_api_discovery.py b/tempest/api/identity/v3/test_api_discovery.py
index c04c21b..e87d1cd 100644
--- a/tempest/api/identity/v3/test_api_discovery.py
+++ b/tempest/api/identity/v3/test_api_discovery.py
@@ -14,12 +14,24 @@
# under the License.
from tempest.api.identity import base
+from tempest import config
from tempest.lib import decorators
+CONF = config.CONF
+
+
class TestApiDiscovery(base.BaseIdentityV3Test):
"""Tests for API discovery features."""
+ @decorators.idempotent_id('79aec9ae-710f-4c54-a4fc-3aa25b4feac3')
+ def test_identity_v3_existence(self):
+ versions = self.non_admin_versions_client.list_versions()
+ found = any(
+ "v3" in version.get('id')
+ for version in versions['versions']['values'])
+ self.assertEqual(CONF.identity_feature_enabled.api_v3, found)
+
@decorators.idempotent_id('721f480f-35b6-46c7-846e-047e6acea0dc')
@decorators.attr(type='smoke')
def test_list_api_versions(self):
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index c938cee..5a27a43 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -57,6 +57,13 @@
self.assertIn('status', image)
self.assertEqual('queued', image['status'])
+ # NOTE: This Glance API returns different status codes for image
+ # condition. In this empty data case, Glance should return 204,
+ # so here should check the status code.
+ image_file = self.client.show_image_file(image['id'])
+ self.assertEqual(0, len(image_file.data))
+ self.assertEqual(204, image_file.response.status)
+
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = six.BytesIO(file_content)
@@ -115,17 +122,6 @@
visibility='private')
self.assertEqual('queued', image['status'])
- # NOTE: This Glance API returns different status codes for image
- # condition. In this empty data case, Glance should return 204,
- # so here should check the status code.
- image_file = self.client.show_image_file(image['id'])
- self.assertEqual(0, len(image_file.data))
- self.assertEqual(204, image_file.response.status)
-
- # Now try uploading an image file
- image_file = six.BytesIO(data_utils.random_bytes())
- self.client.store_image_file(image['id'], image_file)
-
# Update Image
new_image_name = data_utils.rand_name('new-image')
self.client.update_image(image['id'], [
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
deleted file mode 100644
index eaf477c..0000000
--- a/tempest/api/network/admin/test_agent_management.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.network import base
-from tempest.common import tempest_fixtures as fixtures
-from tempest.common import utils
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
-
-
-class AgentManagementTestJSON(base.BaseAdminNetworkTest):
-
- @classmethod
- def skip_checks(cls):
- super(AgentManagementTestJSON, cls).skip_checks()
- if not utils.is_extension_enabled('agent', 'network'):
- msg = "agent extension not enabled."
- raise cls.skipException(msg)
-
- @classmethod
- def resource_setup(cls):
- super(AgentManagementTestJSON, cls).resource_setup()
- body = cls.admin_agents_client.list_agents()
- agents = body['agents']
- cls.agent = agents[0]
-
- @decorators.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4')
- def test_list_agent(self):
- body = self.admin_agents_client.list_agents()
- agents = body['agents']
- # Hearthbeats must be excluded from comparison
- self.agent.pop('heartbeat_timestamp', None)
- self.agent.pop('configurations', None)
- for agent in agents:
- agent.pop('heartbeat_timestamp', None)
- agent.pop('configurations', None)
- self.assertIn(self.agent, agents)
-
- @decorators.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
- def test_show_agent(self):
- body = self.admin_agents_client.show_agent(self.agent['id'])
- agent = body['agent']
- self.assertEqual(agent['id'], self.agent['id'])
-
- @decorators.idempotent_id('371dfc5b-55b9-4cb5-ac82-c40eadaac941')
- def test_update_agent_status(self):
- origin_status = self.agent['admin_state_up']
- # Try to update the 'admin_state_up' to the original
- # one to avoid the negative effect.
- agent_status = {'admin_state_up': origin_status}
- body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
- agent=agent_status)
- updated_status = body['agent']['admin_state_up']
- self.assertEqual(origin_status, updated_status)
-
- @decorators.idempotent_id('68a94a14-1243-46e6-83bf-157627e31556')
- def test_update_agent_description(self):
- self.useFixture(fixtures.LockFixture('agent_description'))
- description = 'description for update agent.'
- agent_description = {'description': description}
- body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
- agent=agent_description)
- self.addCleanup(self._restore_agent)
- updated_description = body['agent']['description']
- self.assertEqual(updated_description, description)
-
- def _restore_agent(self):
- """Restore the agent description after update test"""
-
- description = self.agent['description'] or ''
- origin_agent = {'description': description}
- self.admin_agents_client.update_agent(agent_id=self.agent['id'],
- agent=origin_agent)
-
- @decorators.idempotent_id('b33af888-b6ac-4e68-a0ca-0444c2696cf9')
- @decorators.attr(type=['negative'])
- def test_delete_agent_negative(self):
- non_existent_id = data_utils.rand_uuid()
- self.assertRaises(
- lib_exc.NotFound,
- self.admin_agents_client.delete_agent, non_existent_id)
diff --git a/tempest/api/network/admin/test_external_networks_negative.py b/tempest/api/network/admin/test_external_networks_negative.py
index 0709d2a..da32f2d 100644
--- a/tempest/api/network/admin/test_external_networks_negative.py
+++ b/tempest/api/network/admin/test_external_networks_negative.py
@@ -16,6 +16,7 @@
from tempest.api.network import base
from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -50,5 +51,6 @@
# create a port which will internally create an instance-ip
self.assertRaises(lib_exc.Conflict,
self.admin_ports_client.create_port,
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=CONF.network.public_network_id,
fixed_ips=fixed_ips)
diff --git a/tempest/api/network/admin/test_ports.py b/tempest/api/network/admin/test_ports.py
index 3910fc9..289e577 100644
--- a/tempest/api/network/admin/test_ports.py
+++ b/tempest/api/network/admin/test_ports.py
@@ -16,6 +16,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -41,7 +42,8 @@
@utils.services('compute')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
- "binding:host_id": self.host_id}
+ "binding:host_id": self.host_id,
+ "name": data_utils.rand_name(self.__class__.__name__)}
body = self.admin_ports_client.create_port(**post_body)
port = body['port']
self.addCleanup(
@@ -54,7 +56,8 @@
@decorators.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
@utils.services('compute')
def test_update_port_binding_ext_attr(self):
- post_body = {"network_id": self.network['id']}
+ post_body = {"network_id": self.network['id'],
+ "name": data_utils.rand_name(self.__class__.__name__)}
body = self.admin_ports_client.create_port(**post_body)
port = body['port']
self.addCleanup(
@@ -71,7 +74,8 @@
@utils.services('compute')
def test_list_ports_binding_ext_attr(self):
# Create a new port
- post_body = {"network_id": self.network['id']}
+ post_body = {"network_id": self.network['id'],
+ "name": data_utils.rand_name(self.__class__.__name__)}
body = self.admin_ports_client.create_port(**post_body)
port = body['port']
self.addCleanup(
@@ -98,6 +102,7 @@
@decorators.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=self.network['id'])
port = body['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
diff --git a/tempest/api/network/admin/test_routers_negative.py b/tempest/api/network/admin/test_routers_negative.py
index fdcc977..f605945 100644
--- a/tempest/api/network/admin/test_routers_negative.py
+++ b/tempest/api/network/admin/test_routers_negative.py
@@ -18,6 +18,7 @@
from tempest.api.network import base
from tempest.common import utils
from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -42,6 +43,7 @@
def test_router_set_gateway_used_ip_returns_409(self):
# At first create a address from public_network_id
port = self.admin_ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=CONF.network.public_network_id)['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_ports_client.delete_port,
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 9032fdc..b6bf369 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -156,6 +156,8 @@
@classmethod
def create_port(cls, network, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(cls.__name__)
"""Wrapper utility that returns a test port."""
body = cls.ports_client.create_port(network_id=network['id'],
**kwargs)
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index d393207..639defb 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -17,6 +17,7 @@
from tempest.api.network import base
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -61,6 +62,7 @@
'mac_address': self.mac_address}]
body = self.ports_client.create_port(
network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
allowed_address_pairs=allowed_address_pairs)
port_id = body['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
@@ -76,7 +78,9 @@
def _update_port_with_address(self, address, mac_address=None, **kwargs):
# Create a port without allowed address pair
- body = self.ports_client.create_port(network_id=self.network['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
port_id = body['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_id)
@@ -107,7 +111,9 @@
@decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
def test_update_port_with_multiple_ip_mac_address_pair(self):
# Create an ip _address and mac_address through port create
- resp = self.ports_client.create_port(network_id=self.network['id'])
+ resp = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
newportid = resp['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, newportid)
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index 8e94429..d363081 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -61,6 +61,7 @@
# Create a port with Extra DHCP Options
body = self.ports_client.create_port(
network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
extra_dhcp_opts=self.extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 9704c73..aaa5497 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -142,7 +142,9 @@
self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
# Create a port
- port = self.ports_client.create_port(network_id=self.network['id'])
+ port = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
created_port = port['port']
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
@@ -237,8 +239,10 @@
2)
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
# Create port
- body = self.ports_client.create_port(network_id=self.network['id'],
- fixed_ips=fixed_ips)
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
+ fixed_ips=fixed_ips)
port = body['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port['id'])
diff --git a/tempest/api/network/test_networks_negative.py b/tempest/api/network/test_networks_negative.py
index bc4f41f..3af67dd 100644
--- a/tempest/api/network/test_networks_negative.py
+++ b/tempest/api/network/test_networks_negative.py
@@ -79,7 +79,8 @@
non_exist_net_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.ports_client.create_port,
- network_id=non_exist_net_id)
+ network_id=non_exist_net_id,
+ name=data_utils.rand_name(self.__class__.__name__))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('cf8eef21-4351-4f53-adcd-cc5cb1e76b92')
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 93a4631..10121de 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -71,7 +71,9 @@
@decorators.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
- body = self.ports_client.create_port(network_id=self.network['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
@@ -118,7 +120,9 @@
self._create_subnet(network, cidr=address,
mask_bits=address.prefixlen,
**allocation_pools)
- body = self.ports_client.create_port(network_id=net_id)
+ body = self.ports_client.create_port(
+ network_id=net_id,
+ name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, body['port']['id'])
port = body['port']
@@ -172,10 +176,14 @@
network = self._create_network()
self._create_subnet(network)
# Create two ports
- port_1 = self.ports_client.create_port(network_id=network['id'])
+ port_1 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_1['port']['id'])
- port_2 = self.ports_client.create_port(network_id=network['id'])
+ port_2 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
@@ -189,14 +197,14 @@
'Ports from multiple tenants are in the list resp')
port_ids = [port['id'] for port in ports]
fixed_ips = [port['fixed_ips'] for port in ports]
- port_ips = []
- for addr in fixed_ips:
- port_ips.extend([port['ip_address'] for port in addr])
-
port_net_ids = [port['network_id'] for port in ports]
self.assertIn(port_1['port']['id'], port_ids)
- self.assertIn(port_1_fixed_ip, port_ips)
self.assertIn(network['id'], port_net_ids)
+ # Check that every port has a fixed_ip that matches the query
+ for addr in fixed_ips:
+ port_ips = [port['ip_address'] for port in addr]
+ self.assertIn(port_1_fixed_ip, port_ips,
+ 'Port not matching IP filter found')
@decorators.idempotent_id('79895408-85d5-460d-94e7-9531c5fd9123')
@testtools.skipUnless(
@@ -224,13 +232,17 @@
# Create two ports
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_1}]
- port_1 = self.ports_client.create_port(network_id=network['id'],
- fixed_ips=fixed_ips)
+ port_1 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
+ fixed_ips=fixed_ips)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_1['port']['id'])
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_2}]
- port_2 = self.ports_client.create_port(network_id=network['id'],
- fixed_ips=fixed_ips)
+ port_2 = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__),
+ fixed_ips=fixed_ips)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, port_2['port']['id'])
@@ -283,7 +295,9 @@
router = self.create_router()
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.delete_router, router['id'])
- port = self.ports_client.create_port(network_id=network['id'])
+ port = self.ports_client.create_port(
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
# Add router interface to port created above
self.routers_client.add_router_interface(router['id'],
port_id=port['port']['id'])
@@ -359,7 +373,7 @@
self.security_groups_client.delete_security_group,
security_group['security_group']['id'])
post_body = {
- "name": data_utils.rand_name('port-'),
+ "name": data_utils.rand_name(self.__class__.__name__),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
@@ -372,10 +386,11 @@
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
- update_body = {"name": data_utils.rand_name('port-'),
- "admin_state_up": False,
- "fixed_ips": fixed_ip_2,
- "security_groups": security_groups_list}
+ update_body = {
+ "name": data_utils.rand_name(self.__class__.__name__),
+ "admin_state_up": False,
+ "fixed_ips": fixed_ip_2,
+ "security_groups": security_groups_list}
body = self.ports_client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
@@ -410,13 +425,17 @@
@decorators.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
- body = self.ports_client.create_port(network_id=self.network['id'])
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
old_port = body['port']
free_mac_address = old_port['mac_address']
self.ports_client.delete_port(old_port['id'])
# Create a new port with user defined mac
- body = self.ports_client.create_port(network_id=self.network['id'],
- mac_address=free_mac_address)
+ body = self.ports_client.create_port(
+ network_id=self.network['id'],
+ mac_address=free_mac_address,
+ name=data_utils.rand_name(self.__class__.__name__))
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.ports_client.delete_port, body['port']['id'])
port = body['port']
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index f223fa4..ad316d1 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -123,7 +123,8 @@
router = self.create_router()
self.addCleanup(self.delete_router, router)
port_body = self.ports_client.create_port(
- network_id=network['id'])
+ network_id=network['id'],
+ name=data_utils.rand_name(self.__class__.__name__))
# add router interface to port created above
interface = self.routers_client.add_router_interface(
router['id'],
@@ -150,6 +151,7 @@
# Don't know public_network_address, so at first create address
# from public_network and delete
port = self.admin_ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
network_id=CONF.network.public_network_id)['port']
self.admin_ports_client.delete_port(port_id=port['id'])
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index f61d9f8..1567e06 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -34,13 +34,12 @@
def setUp(self):
super(CrossdomainTest, self).setUp()
- # Turning http://.../v1/foobar into http://.../
- self.account_client.skip_path()
-
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
- resp, body = self.account_client.get("crossdomain.xml", {})
+ url = self.account_client._get_base_version_url() + "crossdomain.xml"
+ resp, body = self.account_client.raw_request(url, "GET")
+ self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index a186f9e..8e9e406 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -22,13 +22,12 @@
def setUp(self):
super(HealthcheckTest, self).setUp()
- # Turning http://.../v1/foobar into http://.../
- self.account_client.skip_path()
@decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
def test_get_healthcheck(self):
-
- resp, _ = self.account_client.get("healthcheck", {})
+ url = self.account_client._get_base_version_url() + "healthcheck"
+ resp, body = self.account_client.raw_request(url, "GET")
+ self.account_client._error_checker(resp, body)
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index affed6b..1351704 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -21,17 +21,6 @@
class BackendsCapabilitiesAdminTestsJSON(base.BaseVolumeAdminTest):
- CAPABILITIES = ('namespace',
- 'vendor_name',
- 'volume_backend_name',
- 'pool_name',
- 'driver_version',
- 'storage_protocol',
- 'display_name',
- 'description',
- 'visibility',
- 'properties')
-
@classmethod
def resource_setup(cls):
super(BackendsCapabilitiesAdminTestsJSON, cls).resource_setup()
@@ -44,12 +33,8 @@
@decorators.idempotent_id('3750af44-5ea2-4cd4-bc3e-56e7e6caf854')
def test_get_capabilities_backend(self):
# Test backend properties
- backend = self.admin_capabilities_client.show_backend_capabilities(
- self.hosts[0])
-
- # Verify getting capabilities parameters from a backend
- for key in self.CAPABILITIES:
- self.assertIn(key, backend)
+ # Check response schema
+ self.admin_capabilities_client.show_backend_capabilities(self.hosts[0])
@decorators.idempotent_id('a9035743-d46a-47c5-9cb7-3c80ea16dea0')
def test_compare_volume_stats_values(self):
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 9907497..8048017 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -20,18 +20,6 @@
CONF = config.CONF
-MESSAGE_KEYS = [
- 'created_at',
- 'event_id',
- 'guaranteed_until',
- 'id',
- 'message_level',
- 'request_id',
- 'resource_type',
- 'resource_uuid',
- 'user_message',
- 'links']
-
class UserMessagesTest(base.BaseVolumeAdminTest):
_api_version = 3
@@ -66,18 +54,11 @@
message_id = self._create_user_message()
self.addCleanup(self.messages_client.delete_message, message_id)
- # show message
- message = self.messages_client.show_message(message_id)['message']
- for key in MESSAGE_KEYS:
- self.assertIn(key, message.keys(), 'Missing expected key %s' % key)
+ # show message, check response schema
+ self.messages_client.show_message(message_id)
- # list messages
- messages = self.messages_client.list_messages()['messages']
- self.assertIsInstance(messages, list)
- for message in messages:
- for key in MESSAGE_KEYS:
- self.assertIn(key, message.keys(),
- 'Missing expected key %s' % key)
+ # list messages, check response schema
+ self.messages_client.list_messages()
@decorators.idempotent_id('c6eb6901-cdcc-490f-b735-4fe251842aed')
def test_delete_message(self):
diff --git a/tempest/api/volume/admin/test_volume_hosts.py b/tempest/api/volume/admin/test_volume_hosts.py
index 7e53ce8..83c27e1 100644
--- a/tempest/api/volume/admin/test_volume_hosts.py
+++ b/tempest/api/volume/admin/test_volume_hosts.py
@@ -26,13 +26,6 @@
"The count of volume hosts is < 2, "
"response of list hosts is: %s" % hosts)
- # Check elements in volume hosts list
- host_list_keys = ['service', 'host_name', 'last-update',
- 'zone', 'service-status', 'service-state']
- for host in hosts:
- for key in host_list_keys:
- self.assertIn(key, host)
-
@decorators.idempotent_id('21168d57-b373-4b71-a3ac-f2c88f0c5d31')
def test_show_host(self):
hosts = self.admin_hosts_client.list_hosts()['hosts']
@@ -53,12 +46,6 @@
"all hosts that found are: %s" % hosts)
# Check each cinder-volume host.
- host_detail_keys = ['project', 'volume_count', 'snapshot_count',
- 'host', 'total_volume_gb', 'total_snapshot_gb']
for host in c_vol_hosts:
host_details = self.admin_hosts_client.show_host(host)['host']
self.assertNotEmpty(host_details)
- for detail in host_details:
- self.assertIn('resource', detail)
- for key in host_detail_keys:
- self.assertIn(key, detail['resource'])
diff --git a/tempest/api/volume/admin/test_volume_pools.py b/tempest/api/volume/admin/test_volume_pools.py
index d389c26..744bc01 100644
--- a/tempest/api/volume/admin/test_volume_pools.py
+++ b/tempest/api/volume/admin/test_volume_pools.py
@@ -24,6 +24,7 @@
def _assert_pools(self, with_detail=False):
cinder_pools = self.admin_scheduler_stats_client.list_pools(
detail=with_detail)['pools']
+ self.assertNotEmpty(cinder_pools, "no cinder pools listed.")
self.assertIn('name', cinder_pools[0])
if with_detail:
self.assertIn(CONF.volume.vendor_name,
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 053a7d9..b073604 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -19,7 +19,6 @@
QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups',
'backup_gigabytes', 'per_volume_gigabytes']
-QUOTA_USAGE_KEYS = ['reserved', 'limit', 'in_use']
class VolumeQuotasAdminTestJSON(base.BaseVolumeAdminTest):
@@ -55,17 +54,13 @@
@decorators.idempotent_id('59eada70-403c-4cef-a2a3-a8ce2f1b07a0')
def test_list_quotas(self):
- quotas = (self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
- ['quota_set'])
- for key in QUOTA_KEYS:
- self.assertIn(key, quotas)
+ # Check response schema
+ self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
@decorators.idempotent_id('2be020a2-5fdd-423d-8d35-a7ffbc36e9f7')
def test_list_default_quotas(self):
- quotas = self.admin_quotas_client.show_default_quota_set(
- self.demo_tenant_id)['quota_set']
- for key in QUOTA_KEYS:
- self.assertIn(key, quotas)
+ # Check response schema
+ self.admin_quotas_client.show_default_quota_set(self.demo_tenant_id)
@decorators.idempotent_id('3d45c99e-cc42-4424-a56e-5cbd212b63a6')
def test_update_all_quota_resources_for_tenant(self):
@@ -92,13 +87,9 @@
@decorators.idempotent_id('18c51ae9-cb03-48fc-b234-14a19374dbed')
def test_show_quota_usage(self):
- quota_usage = self.admin_quotas_client.show_quota_set(
- self.os_admin.credentials.tenant_id,
- params={'usage': True})['quota_set']
- for key in QUOTA_KEYS:
- self.assertIn(key, quota_usage)
- for usage_key in QUOTA_USAGE_KEYS:
- self.assertIn(usage_key, quota_usage[key])
+ # Check response schema
+ self.admin_quotas_client.show_quota_set(
+ self.os_admin.credentials.tenant_id, params={'usage': True})
@decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
def test_delete_quota(self):
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 9136139..18e0b9b 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -94,7 +94,7 @@
super(VolumeRetypeTest, cls).skip_checks()
if not CONF.volume_feature_enabled.multi_backend:
- raise cls.skipException("Cinder multi-backend feature disabled.")
+ raise cls.skipException("Cinder multi-backend feature disabled")
if len(set(CONF.volume.backend_names)) < 2:
raise cls.skipException("Requires at least two different "
diff --git a/tempest/api/volume/test_versions.py b/tempest/api/volume/test_versions.py
index b4d48db..b602032 100644
--- a/tempest/api/volume/test_versions.py
+++ b/tempest/api/volume/test_versions.py
@@ -27,3 +27,15 @@
# with JSON-Schema validation. It is enough to just call
# the API here.
self.versions_client.list_versions()
+
+ @decorators.idempotent_id('7f755ae2-caa9-4049-988c-331d8f7a579f')
+ def test_show_version(self):
+ # NOTE: The version data is checked on service client side
+ # with JSON-Schema validation. So we will loop through each
+ # version and call show version.
+ versions = self.versions_client.list_versions()['versions']
+ for version_dict in versions:
+ version = version_dict['id']
+ major_version = version.split('.')[0]
+ response = self.versions_client.show_version(major_version)
+ self.assertEqual(version, response['versions'][0]['id'])
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index c85e0bc..4cdf898 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -63,8 +63,6 @@
# Accept a volume transfer by alt_tenant
body = self.alt_client.accept_volume_transfer(
transfer_id, auth_key=auth_key)['transfer']
- for key in ['id', 'name', 'links', 'volume_id']:
- self.assertIn(key, body)
waiters.wait_for_volume_resource_status(self.alt_volumes_client,
volume['id'], 'available')
accepted_volume = self.alt_volumes_client.show_volume(
@@ -95,8 +93,6 @@
# elements, and look for the created transfer.
transfers = self.client.list_volume_transfers(detail=True)['transfers']
self.assertNotEmpty(transfers)
- for transfer in transfers:
- self.assertIn('created_at', transfer)
volume_list = [transfer['volume_id'] for transfer in transfers]
self.assertIn(volume['id'], volume_list,
'Transfer not found for volume %s' % volume['id'])
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 6ce5d3e..c178272 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -50,7 +50,6 @@
'available')
return restored_volume
- @decorators.skip_because(bug="1483434")
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'ceph does not support arbitrary container names')
@decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index d5358ab..2345698 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -17,7 +17,7 @@
import operator
import random
-from six.moves.urllib import parse
+from six.moves.urllib.parse import urlparse
from testtools import matchers
from tempest.api.volume import base
@@ -333,7 +333,19 @@
# If the current iteration is from a 'next' link, check that the
# absolute url is the same as the one used for this request
if next:
- self.assertEqual(next, response.response['content-location'])
+ curr = response.response['content-location']
+ currparsed = urlparse(curr)
+ nextparsed = urlparse(next)
+ # Depending on the environment, certain fields are omitted
+ # from url (ie port). The fields to check are defined here.
+ fieldscheck = ['scheme', 'hostname', 'path', 'query', 'params',
+ 'fragment']
+ for field in fieldscheck:
+ self.assertEqual(getattr(currparsed, field),
+ getattr(nextparsed, field),
+ 'Incorrect link to next page. URLs do '
+ 'not match at %s:\n%s\n%s' % (field, curr,
+ next))
# Get next from response
next = None
@@ -352,7 +364,7 @@
# If we can follow to the next page, get params from url to make
# request in the form of a relative URL
if next:
- params = parse.urlparse(next).query
+ params = urlparse(next).query
# If cannot follow make sure it's because we have finished
else:
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 7ea0099..1535786 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -46,7 +46,6 @@
Username ``--os-username`` OS_USERNAME
Password ``--os-password`` OS_PASSWORD
Project ``--os-project-name`` OS_PROJECT_NAME
-Tenant ``--os-tenant-name`` (depr.) OS_TENANT_NAME
Domain ``--os-domain-name`` OS_DOMAIN_NAME
======== ============================ ====================
@@ -75,9 +74,6 @@
* ``--os-project-name <auth-project-name>`` (Optional) Project to request
authorization on. Defaults to env[OS_PROJECT_NAME].
-* ``--os-tenant-name <auth-tenant-name>`` (Optional, deprecated) Tenant to
- request authorization on. Defaults to env[OS_TENANT_NAME].
-
* ``--os-domain-name <auth-domain-name>`` (Optional) Domain the user and
project belong to. Defaults to env[OS_DOMAIN_NAME].
@@ -139,7 +135,7 @@
'dhcp': True}
admin_creds_dict = {'username': opts.os_username,
'password': opts.os_password}
- _project_name = opts.os_project_name or opts.os_tenant_name
+ _project_name = opts.os_project_name
if opts.identity_version == 3:
admin_creds_dict['project_name'] = _project_name
admin_creds_dict['domain_name'] = opts.os_domain_name or 'Default'
@@ -221,10 +217,6 @@
metavar='<auth-project-name>',
default=os.environ.get('OS_PROJECT_NAME'),
help='Defaults to env[OS_PROJECT_NAME].')
- parser.add_argument('--os-tenant-name',
- metavar='<auth-tenant-name>',
- default=os.environ.get('OS_TENANT_NAME'),
- help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os-domain-name',
metavar='<auth-domain-name>',
default=os.environ.get('OS_DOMAIN_NAME'),
@@ -301,10 +293,6 @@
if log_warning:
LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
"please use: 'tempest account-generator'")
- if opts.os_tenant_name:
- LOG.warning("'os-tenant-name' and 'OS_TENANT_NAME' are both "
- "deprecated, please use 'os-project-name' or "
- "'OS_PROJECT_NAME' instead")
resources = []
for count in range(opts.concurrency):
# Use N different cred_providers to obtain different sets of creds
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 645a952..c54b16b 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -135,10 +135,11 @@
self.admin_project_id = ""
self._init_admin_ids()
- self.admin_role_added = []
-
# available services
- self.project_services = cleanup_service.get_project_cleanup_services()
+ self.project_associated_services = (
+ cleanup_service.get_project_associated_cleanup_services())
+ self.resource_cleanup_services = (
+ cleanup_service.get_resource_cleanup_services())
self.global_services = cleanup_service.get_global_cleanup_services()
if parsed_args.init_saved_state:
@@ -170,7 +171,6 @@
# Loop through list of projects and clean them up.
for project in projects:
- self._add_admin(project['id'])
self._clean_project(project)
kwargs = {'data': self.dry_run_data,
@@ -183,20 +183,15 @@
svc = service(admin_mgr, **kwargs)
svc.run()
+ for service in self.resource_cleanup_services:
+ svc = service(self.admin_mgr, **kwargs)
+ svc.run()
+
if is_dry_run:
with open(DRY_RUN_JSON, 'w+') as f:
f.write(json.dumps(self.dry_run_data, sort_keys=True,
indent=2, separators=(',', ': ')))
- self._remove_admin_user_roles()
-
- def _remove_admin_user_roles(self):
- project_ids = self.admin_role_added
- LOG.debug("Removing admin user roles where needed for projects: %s",
- project_ids)
- for project_id in project_ids:
- self._remove_admin_role(project_id)
-
def _clean_project(self, project):
print("Cleaning project: %s " % project['name'])
is_dry_run = self.options.dry_run
@@ -209,11 +204,6 @@
project_data = dry_run_data["_projects_to_clean"][project_id] = {}
project_data['name'] = project_name
- kwargs = {"username": CONF.auth.admin_username,
- "password": CONF.auth.admin_password,
- "project_name": project['name']}
- mgr = clients.Manager(credentials=credentials.get_credentials(
- **kwargs))
kwargs = {'data': project_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
@@ -221,8 +211,8 @@
'is_save_state': False,
'project_id': project_id,
'got_exceptions': self.GOT_EXCEPTIONS}
- for service in self.project_services:
- svc = service(mgr, **kwargs)
+ for service in self.project_associated_services:
+ svc = service(self.admin_mgr, **kwargs)
svc.run()
def _init_admin_ids(self):
@@ -272,46 +262,6 @@
def get_description(self):
return 'Cleanup after tempest run'
- def _add_admin(self, project_id):
- rl_cl = self.admin_mgr.roles_v3_client
- needs_role = True
- roles = rl_cl.list_user_roles_on_project(project_id,
- self.admin_id)['roles']
- for role in roles:
- if role['id'] == self.admin_role_id:
- needs_role = False
- LOG.debug("User already had admin privilege for this project")
- if needs_role:
- LOG.debug("Adding admin privilege for : %s", project_id)
- rl_cl.create_user_role_on_project(project_id, self.admin_id,
- self.admin_role_id)
- self.admin_role_added.append(project_id)
-
- def _remove_admin_role(self, project_id):
- LOG.debug("Remove admin user role for projectt: %s", project_id)
- # Must initialize Admin Manager for each user role
- # Otherwise authentication exception is thrown, weird
- id_cl = clients.Manager(
- credentials.get_configured_admin_credentials()).identity_client
- if (self._project_exists(project_id)):
- try:
- id_cl.delete_role_from_user_on_project(project_id,
- self.admin_id,
- self.admin_role_id)
- except Exception as ex:
- LOG.exception("Failed removing role from project which still "
- "exists, exception: %s", ex)
-
- def _project_exists(self, project_id):
- pr_cl = self.admin_mgr.projects_client
- try:
- p = pr_cl.show_project(project_id)
- LOG.debug("Project is: %s", str(p))
- return True
- except Exception as ex:
- LOG.debug("Project no longer exists? %s", ex)
- return False
-
def _init_state(self):
print("Initializing saved state.")
data = {}
@@ -326,7 +276,11 @@
svc = service(admin_mgr, **kwargs)
svc.run()
- for service in self.project_services:
+ for service in self.project_associated_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
+
+ for service in self.resource_cleanup_services:
svc = service(admin_mgr, **kwargs)
svc.run()
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 8b625d0..5b3b72a 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -362,6 +362,25 @@
self.data['compute_quotas'] = quotas['absolute']
+class NetworkQuotaService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(NetworkQuotaService, self).__init__(kwargs)
+ self.client = manager.network_quotas_client
+
+ def delete(self):
+ client = self.client
+ try:
+ client.reset_quotas(self.project_id)
+ except Exception:
+ LOG.exception("Delete Network Quotas exception for 'project %s'.",
+ self.project_id)
+
+ def dry_run(self):
+ resp = [quota for quota in self.client.list_quotas()['quotas']
+ if quota['project_id'] == self.project_id]
+ self.data['network_quotas'] = resp
+
+
# Begin network service classes
class BaseNetworkService(BaseService):
def __init__(self, manager, **kwargs):
@@ -730,6 +749,44 @@
# begin global services
+class RegionService(BaseService):
+
+ def __init__(self, manager, **kwargs):
+ super(RegionService, self).__init__(kwargs)
+ self.client = manager.regions_client
+
+ def list(self):
+ client = self.client
+ regions = client.list_regions()
+ if not self.is_save_state:
+ regions = [region for region in regions['regions'] if region['id']
+ not in self.saved_state_json['regions'].keys()]
+ return regions
+ else:
+ return regions['regions']
+
+ def delete(self):
+ client = self.client
+ regions = self.list()
+ for region in regions:
+ try:
+ client.delete_region(region['id'])
+ except Exception:
+ LOG.exception("Delete Region %s exception.", region['id'])
+
+ def dry_run(self):
+ regions = self.list()
+ self.data['regions'] = {}
+ for region in regions:
+ self.data['regions'][region['id']] = region
+
+ def save_state(self):
+ regions = self.list()
+ self.data['regions'] = {}
+ for region in regions:
+ self.data['regions'][region['id']] = region
+
+
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
super(FlavorService, self).__init__(kwargs)
@@ -968,31 +1025,53 @@
self.data['domains'][domain['id']] = domain['name']
-def get_project_cleanup_services():
- project_services = []
+def get_project_associated_cleanup_services():
+ """Returns list of project service classes.
+
+ The list contains services whose resources need to be deleted prior,
+ the project they are associated with, deletion. The resources cannot be
+ most likely deleted after the project is deleted first.
+ """
+ project_associated_services = []
# TODO(gmann): Tempest should provide some plugin hook for cleanup
# script extension to plugin tests also.
if IS_NOVA:
- project_services.append(ServerService)
- project_services.append(KeyPairService)
- project_services.append(ServerGroupService)
- project_services.append(NovaQuotaService)
- if IS_NEUTRON:
- project_services.append(NetworkFloatingIpService)
- if utils.is_extension_enabled('metering', 'network'):
- project_services.append(NetworkMeteringLabelRuleService)
- project_services.append(NetworkMeteringLabelService)
- project_services.append(NetworkRouterService)
- project_services.append(NetworkPortService)
- project_services.append(NetworkSubnetService)
- project_services.append(NetworkService)
- project_services.append(NetworkSecGroupService)
- project_services.append(NetworkSubnetPoolsService)
+ project_associated_services.append(NovaQuotaService)
if IS_CINDER:
- project_services.append(SnapshotService)
- project_services.append(VolumeService)
- project_services.append(VolumeQuotaService)
- return project_services
+ project_associated_services.append(VolumeQuotaService)
+ return project_associated_services
+
+
+def get_resource_cleanup_services():
+ """Returns list of project related classes.
+
+ The list contains services whose resources are associated with a project,
+ however, their deletion is possible also after the project is deleted
+ first.
+ """
+ resource_cleanup_services = []
+ # TODO(gmann): Tempest should provide some plugin hook for cleanup
+ # script extension to plugin tests also.
+ if IS_NOVA:
+ resource_cleanup_services.append(ServerService)
+ resource_cleanup_services.append(KeyPairService)
+ resource_cleanup_services.append(ServerGroupService)
+ if IS_NEUTRON:
+ resource_cleanup_services.append(NetworkFloatingIpService)
+ if utils.is_extension_enabled('metering', 'network'):
+ resource_cleanup_services.append(NetworkMeteringLabelRuleService)
+ resource_cleanup_services.append(NetworkMeteringLabelService)
+ resource_cleanup_services.append(NetworkRouterService)
+ resource_cleanup_services.append(NetworkPortService)
+ resource_cleanup_services.append(NetworkSubnetService)
+ resource_cleanup_services.append(NetworkService)
+ resource_cleanup_services.append(NetworkSecGroupService)
+ resource_cleanup_services.append(NetworkSubnetPoolsService)
+ resource_cleanup_services.append(NetworkQuotaService)
+ if IS_CINDER:
+ resource_cleanup_services.append(SnapshotService)
+ resource_cleanup_services.append(VolumeService)
+ return resource_cleanup_services
def get_global_cleanup_services():
@@ -1005,4 +1084,5 @@
global_services.append(ProjectService)
global_services.append(DomainService)
global_services.append(RoleService)
+ global_services.append(RegionService)
return global_services
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 081fa7a..e029538 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -81,13 +81,19 @@
import os
import re
import sys
+import traceback
+from cliff.command import Command
from oslo_serialization import jsonutils as json
import subunit
import testtools
+DESCRIPTION = "Outputs all HTTP calls a given test made that were logged."
+
+
class UrlParser(testtools.TestResult):
+
uuid_re = re.compile(r'(^|[^0-9a-f])[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{4}-[0-9a-f]{12}([^0-9a-f]|$)')
id_re = re.compile(r'(^|[^0-9a-z])[0-9a-z]{8}[0-9a-z]{4}[0-9a-z]{4}'
@@ -241,33 +247,12 @@
class ArgumentParser(argparse.ArgumentParser):
+
def __init__(self):
- desc = "Outputs all HTTP calls a given test made that were logged."
+ desc = DESCRIPTION
super(ArgumentParser, self).__init__(description=desc)
-
self.prog = "subunit-describe-calls"
-
- self.add_argument(
- "-s", "--subunit", metavar="<subunit file>",
- nargs="?", type=argparse.FileType('rb'), default=sys.stdin,
- help="The path to the subunit output file.")
-
- self.add_argument(
- "-n", "--non-subunit-name", metavar="<non subunit name>",
- default="pythonlogging",
- help="The name used in subunit to describe the file contents.")
-
- self.add_argument(
- "-o", "--output-file", metavar="<output file>", default=None,
- help="The output file name for the json.")
-
- self.add_argument(
- "-p", "--ports", metavar="<ports file>", default=None,
- help="A JSON file describing the ports for each service.")
-
- self.add_argument(
- "-v", "--verbose", action='store_true', default=False,
- help="Add Request and Response header and body data to stdout.")
+ _parser_add_args(self)
def parse(stream, non_subunit_name, ports):
@@ -321,11 +306,63 @@
sys.stdout.write('\n')
-def entry_point():
- cl_args = ArgumentParser().parse_args()
+def entry_point(cl_args=None):
+ print('Running subunit_describe_calls ...')
+ if not cl_args:
+ print("Use of: 'subunit-describe-calls' is deprecated, "
+ "please use: 'tempest subunit-describe-calls'")
+ cl_args = ArgumentParser().parse_args()
parser = parse(cl_args.subunit, cl_args.non_subunit_name, cl_args.ports)
output(parser, cl_args.output_file, cl_args.verbose)
+def _parser_add_args(parser):
+ parser.add_argument(
+ "-s", "--subunit", metavar="<subunit file>",
+ nargs="?", type=argparse.FileType('rb'), default=sys.stdin,
+ help="The path to the subunit output file(default:stdin v1/v2 stream)"
+ )
+
+ parser.add_argument(
+ "-n", "--non-subunit-name", metavar="<non subunit name>",
+ default="pythonlogging",
+ help="The name used in subunit to describe the file contents."
+ )
+
+ parser.add_argument(
+ "-o", "--output-file", metavar="<output file>", default=None,
+ help="The output file name for the json."
+ )
+
+ parser.add_argument(
+ "-p", "--ports", metavar="<ports file>", default=None,
+ help="A JSON file describing the ports for each service."
+ )
+
+ parser.add_argument(
+ "-v", "--verbose", action='store_true', default=False,
+ help="Add Request and Response header and body data to stdout."
+ )
+
+
+class TempestSubunitDescribeCalls(Command):
+
+ def get_parser(self, prog_name):
+ parser = super(TempestSubunitDescribeCalls, self).get_parser(prog_name)
+ _parser_add_args(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ try:
+ entry_point(parsed_args)
+
+ except Exception:
+ traceback.print_exc()
+ raise
+
+ def get_description(self):
+ return DESCRIPTION
+
+
if __name__ == "__main__":
entry_point()
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index d25d3ca..8d5bdbd 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -433,11 +433,6 @@
def main(opts=None):
- print('Running config verification...')
- if opts is None:
- print("Use of: 'verify-tempest-config' is deprecated, "
- "please use: 'tempest verify-config'")
- opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
@@ -497,7 +492,3 @@
LOG.exception("Failure verifying configuration.")
traceback.print_exc()
raise
-
-
-if __name__ == "__main__":
- main()
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index d76a323..b68a879 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -73,6 +73,13 @@
msg = "'TYPE' column is required but the output doesn't have it: "
raise tempest.lib.exceptions.TempestException(msg + output)
+ def list_disks(self):
+ disks_list = self.get_disks()
+ disks_list = [line[0] for line in
+ [device_name.split()
+ for device_name in disks_list.splitlines()][1:]]
+ return disks_list
+
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
@@ -148,7 +155,7 @@
self.exec_command('sudo umount %s' % mount_path)
def make_fs(self, dev_name, fs='ext4'):
- cmd_mkfs = 'sudo /usr/sbin/mke2fs -t %s /dev/%s' % (fs, dev_name)
+ cmd_mkfs = 'sudo mke2fs -t %s /dev/%s' % (fs, dev_name)
try:
self.exec_command(cmd_mkfs)
except tempest.lib.exceptions.SSHExecCommandFailed:
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 11f3bf9..b547cc6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -217,6 +217,22 @@
resource_name, resource_id, status, time.time() - start)
+def wait_for_volume_attachment_remove(client, volume_id, attachment_id):
+ """Waits for a volume attachment to be removed from a given volume."""
+ start = int(time.time())
+ attachments = client.show_volume(volume_id)['volume']['attachments']
+ while any(attachment_id == a['attachment_id'] for a in attachments):
+ time.sleep(client.build_interval)
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Failed to remove attachment %s from volume %s'
+ 'within the required time (%s s).' %
+ (attachment_id, volume_id, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+ attachments = client.show_volume(volume_id)['volume']['attachments']
+ LOG.info('Attachment %s removed from volume %s after waiting for %f '
+ 'seconds', attachment_id, volume_id, time.time() - start)
+
+
def wait_for_volume_migration(client, volume_id, new_host):
"""Waits for a Volume to move to a new host."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index c50ebbe..5a2d722 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -390,7 +390,7 @@
default='placement',
help="Catalog type of the Placement service."),
cfg.StrOpt('region',
- default='RegionOne',
+ default='',
help="The placement region name to use. If empty, the value "
"of [identity]/region is used instead. If no such region "
"is found in the service catalog, the first region found "
@@ -475,7 +475,14 @@
default=False,
help="Does the test environment support block migration with "
"Cinder iSCSI volumes. Note: libvirt >= 1.2.17 is required "
- "to support this if using the libvirt compute driver."),
+ "to support this if using the libvirt compute driver.",
+ deprecated_for_removal=True,
+ deprecated_reason='This option duplicates the more generic '
+ '[compute-feature-enabled]/block_migration '
+ '_for_live_migration now that '
+ 'MIN_LIBVIRT_VERSION is >= 1.2.17 on all '
+ 'branches from stable/rocky and will be '
+ 'removed in a future release.'),
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
@@ -483,15 +490,28 @@
cfg.StrOpt('vnc_server_header',
default='WebSockify',
help='Expected VNC server name (WebSockify, nginx, etc) '
- 'in response header.'),
+ 'in response header.',
+ deprecated_for_removal=True,
+ deprecated_reason='This option will be ignored because the '
+ 'usage of different response header fields '
+ 'to accomplish the same goal (in accordance '
+ 'with RFC7231 S6.2.2) makes it obsolete.'),
cfg.BoolOpt('spice_console',
default=False,
help='Enable Spice console. This configuration value should '
- 'be same as nova.conf: spice.enabled'),
+ 'be same as nova.conf: spice.enabled',
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is not being used "
+ "in Tempest, we can add it back when "
+ "adding the test cases."),
cfg.BoolOpt('rdp_console',
default=False,
help='Enable RDP console. This configuration value should '
- 'be same as nova.conf: rdp.enabled'),
+ 'be same as nova.conf: rdp.enabled',
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is not being used "
+ "in Tempest, we can add it back when "
+ "adding the test cases."),
cfg.BoolOpt('serial_console',
default=False,
help='Enable serial console. This configuration value '
@@ -662,7 +682,7 @@
default=28,
help="The mask bits for project ipv4 subnets"),
cfg.StrOpt('project_network_v6_cidr',
- default="2003::/48",
+ default="2001:db8::/48",
help="The cidr block to allocate project ipv6 subnets from"),
cfg.IntOpt('project_network_v6_mask_bits',
default=64,
@@ -806,7 +826,7 @@
default="password",
help="Password used to authenticate to an instance."),
cfg.StrOpt('ssh_shell_prologue',
- default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
+ default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
help="Shell fragments to use before executing a command "
"when sshing to a guest."),
cfg.IntOpt('ping_size',
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index 0c526fb..5cf0f8a 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -69,7 +69,7 @@
# need to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
# ****** Schemas unchanged since microversion 2.70 ***
-list_servers_details = copy.deepcopy(servers270.list_servers_detail)
+list_servers_detail = copy.deepcopy(servers270.list_servers_detail)
list_servers = copy.deepcopy(servers270.list_servers)
show_server_diagnostics = copy.deepcopy(servers270.show_server_diagnostics)
get_remote_consoles = copy.deepcopy(servers270.get_remote_consoles)
diff --git a/tempest/lib/api_schema/response/volume/capabilities.py b/tempest/lib/api_schema/response/volume/capabilities.py
new file mode 100644
index 0000000..ec60fc3
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/capabilities.py
@@ -0,0 +1,55 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+property_info = {
+ 'type': 'object',
+ 'properties': {
+ 'type': {'type': 'string'},
+ 'description': {'type': 'string'},
+ 'title': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['type', 'description', 'title']
+}
+
+show_backend_capabilities = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'pool_name': {'type': ['string', 'null']},
+ 'description': {'type': ['string', 'null']},
+ 'volume_backend_name': {'type': 'string'},
+ 'namespace': {'type': 'string',
+ 'pattern': '^OS::Storage::Capabilities::.+$'},
+ 'visibility': {'type': ['string', 'null']},
+ 'driver_version': {'type': 'string'},
+ 'vendor_name': {'type': 'string'},
+ 'properties': {
+ 'type': 'object',
+ 'properties': {
+ '^.+$': property_info
+ },
+ },
+ 'storage_protocol': {'type': 'string'},
+ 'replication_targets': {'type': 'array'},
+ 'display_name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['pool_name', 'volume_backend_name', 'namespace',
+ 'visibility', 'driver_version', 'vendor_name',
+ 'properties', 'storage_protocol', 'replication_targets',
+ 'display_name', 'description']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/hosts.py b/tempest/lib/api_schema/response/volume/hosts.py
new file mode 100644
index 0000000..ce67e9f
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/hosts.py
@@ -0,0 +1,81 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+show_host = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'resource': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_count': {'type': 'string'},
+ 'total_volume_gb': {'type': 'string'},
+ 'total_snapshot_gb': {'type': 'string'},
+ 'project': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'snapshot_count': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_count', 'total_volume_gb',
+ 'total_snapshot_gb', 'project',
+ 'host', 'snapshot_count'],
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['resource']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['host']
+ }
+}
+
+list_hosts = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hosts': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'service-status': {
+ 'enum': ['available', 'unavailable']},
+ 'service': {'type': 'string'},
+ 'zone': {'type': 'string'},
+ 'service-state': {
+ 'enum': ['enabled', 'disabled']},
+ 'host_name': {'type': 'string'},
+ 'last-update': parameter_types.date_time_or_null
+ },
+ 'additionalProperties': False,
+ 'required': ['service-status', 'service', 'zone',
+ 'service-state', 'host_name', 'last-update']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['hosts']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/manage_snapshot.py b/tempest/lib/api_schema/response/volume/manage_snapshot.py
new file mode 100644
index 0000000..bbb9ee2
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/manage_snapshot.py
@@ -0,0 +1,49 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+manage_snapshot = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'snapshot': {
+ 'type': 'object',
+ 'properties': {
+ 'status': {'type': 'string'},
+ 'size': {'type': 'integer'},
+ 'metadata': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ },
+ 'name': {'type': ['string', 'null']},
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'created_at': parameter_types.date_time,
+ 'description': {'type': ['string', 'null']},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'updated_at': parameter_types.date_time_or_null
+ },
+ 'additionalProperties': False,
+ 'required': ['status', 'size', 'volume_id',
+ 'created_at', 'description', 'id', 'updated_at']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['snapshot']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/messages.py b/tempest/lib/api_schema/response/volume/messages.py
new file mode 100644
index 0000000..381f542
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/messages.py
@@ -0,0 +1,64 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+delete_message = {
+ 'status_code': [204],
+}
+
+common_show_message = {
+ 'type': 'object',
+ 'properties': {
+ 'request_id': {'type': 'string'},
+ 'message_level': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'event_id': {'type': 'string'},
+ 'created_at': parameter_types.date_time,
+ 'guaranteed_until': parameter_types.date_time,
+ 'resource_uuid': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'resource_type': {'type': 'string'},
+ 'user_message': {'type': 'string'}},
+ 'additionalProperties': False,
+ 'required': ['request_id', 'message_level', 'event_id', 'created_at',
+ 'id', 'user_message'],
+}
+
+show_message = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'message': common_show_message
+ },
+ 'additionalProperties': False,
+ 'required': ['message']
+ }
+}
+
+list_messages = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'messages': {
+ 'type': 'array',
+ 'items': common_show_message
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['messages']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/quotas.py b/tempest/lib/api_schema/response/volume/quotas.py
new file mode 100644
index 0000000..4be584c
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/quotas.py
@@ -0,0 +1,92 @@
+# Copyright 2019 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+delete_quota_set = {
+ 'status_code': [200],
+}
+
+quota_usage_info = {
+ 'type': 'object',
+ 'properties': {
+ 'reserved': {'type': 'integer'},
+ 'allocated': {'type': 'integer'},
+ 'limit': {'type': 'integer'},
+ 'in_use': {'type': 'integer'}
+ },
+ 'additionalProperties': False,
+ # 'allocated' attribute is available only when nested quota is enabled.
+ 'required': ['reserved', 'limit', 'in_use'],
+}
+
+show_quota_set = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'volumes': {'type': 'integer'},
+ 'snapshots': {'type': 'integer'},
+ 'backups': {'type': 'integer'},
+ 'groups': {'type': 'integer'},
+ 'per_volume_gigabytes': {'type': 'integer'},
+ 'gigabytes': {'type': 'integer'},
+ 'backup_gigabytes': {'type': 'integer'},
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": {'type': 'integer'},
+ 'required': ['id', 'volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes', 'groups'],
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
+
+update_quota_set = copy.deepcopy(show_quota_set)
+update_quota_set['response_body']['properties']['quota_set'][
+ 'required'].remove('id')
+
+show_quota_set_usage = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'volumes': quota_usage_info,
+ 'snapshots': quota_usage_info,
+ 'backups': quota_usage_info,
+ 'groups': quota_usage_info,
+ 'per_volume_gigabytes': quota_usage_info,
+ 'gigabytes': quota_usage_info,
+ 'backup_gigabytes': quota_usage_info,
+ },
+ # for volumes_{volume_type}, etc
+ "additionalProperties": quota_usage_info,
+ 'required': ['id', 'volumes', 'snapshots', 'backups',
+ 'per_volume_gigabytes', 'gigabytes',
+ 'backup_gigabytes', 'groups'],
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/transfers.py b/tempest/lib/api_schema/response/volume/transfers.py
new file mode 100644
index 0000000..d1d1b68
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/transfers.py
@@ -0,0 +1,129 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+create_volume_transfer = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfer': {
+ 'type': 'object',
+ 'properties': {
+ 'auth_key': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'created_at': parameter_types.date_time,
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['auth_key', 'links', 'created_at',
+ 'volume_id', 'id', 'name']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfer']
+ }
+}
+
+common_show_volume_transfer = {
+ 'type': 'object',
+ 'properties': {
+ 'links': parameter_types.links,
+ 'created_at': parameter_types.date_time,
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['links', 'created_at', 'volume_id', 'id', 'name']
+}
+
+show_volume_transfer = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfer': common_show_volume_transfer
+ },
+ 'additionalProperties': False,
+ 'required': ['transfer']
+ }
+}
+
+list_volume_transfers_no_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfers': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'links': parameter_types.links,
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['volume_id', 'id', 'links', 'name']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfers'],
+ }
+}
+
+list_volume_transfers_with_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfers': {
+ 'type': 'array',
+ 'items': common_show_volume_transfer
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfers'],
+ }
+}
+
+delete_volume_transfer = {'status_code': [202]}
+
+accept_volume_transfer = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'transfer': {
+ 'type': 'object',
+ 'properties': {
+ 'links': parameter_types.links,
+ 'volume_id': {'type': 'string', 'format': 'uuid'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': ['string', 'null']}
+ },
+ 'additionalProperties': False,
+ 'required': ['links', 'volume_id', 'id', 'name']
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['transfer']
+ }
+}
diff --git a/tempest/lib/api_schema/response/volume/versions.py b/tempest/lib/api_schema/response/volume/versions.py
old mode 100644
new mode 100755
diff --git a/tempest/lib/base.py b/tempest/lib/base.py
index 3be55c0..74ae77c 100644
--- a/tempest/lib/base.py
+++ b/tempest/lib/base.py
@@ -14,11 +14,29 @@
# under the License.
import os
+import sys
import fixtures
+import pkg_resources
import testtools
+def _handle_skip_exception():
+ try:
+ stestr_version = pkg_resources.parse_version(
+ pkg_resources.get_distribution("stestr").version)
+ stestr_min = pkg_resources.parse_version('2.5.0')
+ new_stestr = (stestr_version >= stestr_min)
+ import unittest
+ import unittest2
+ if sys.version_info >= (3, 5) and new_stestr:
+ testtools.TestCase.skipException = unittest.case.SkipTest
+ else:
+ testtools.TestCase.skipException = unittest2.case.SkipTest
+ except Exception:
+ pass
+
+
class BaseTestCase(testtools.testcase.WithAttributes, testtools.TestCase):
setUpClassCalled = False
@@ -33,6 +51,18 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
+ # TODO(gmann): cls.handle_skip_exception is really workaround for
+ # testtools bug- https://github.com/testing-cabal/testtools/issues/272
+ # stestr which is used by Tempest internally to run the test switch
+ # the customize test runner(which use stdlib unittest) for >=py3.5
+ # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
+ # These two test runner are not compatible due to skip exception
+ # handling(due to unittest2). testtools.run treat unittestt.SkipTest
+ # as error and stdlib unittest treat unittest2.case.SkipTest raised
+ # by testtools.TestCase.skipException.
+ # The below workaround can be removed once testtools fix issue# 272.
+ cls.orig_skip_exception = testtools.TestCase.skipException
+ _handle_skip_exception()
@classmethod
def tearDownClass(cls):
@@ -40,6 +70,7 @@
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
+ testtools.TestCase.skipException = self.orig_skip_exception
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's "
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index d29362d..80dbc1d 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -32,6 +32,10 @@
# (min_microversion, max_microversion) on each test class if necessary.
min_microversion = None
max_microversion = LATEST_MICROVERSION
+ volume_min_microversion = None
+ volume_max_microversion = LATEST_MICROVERSION
+ placement_min_microversion = None
+ placement_max_microversion = LATEST_MICROVERSION
def check_skip_with_microversion(test_min_version, test_max_version,
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index f076727..431a0a0 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -547,24 +547,17 @@
req_url, req_headers, req_body = self.auth_provider.auth_request(
method, url, headers, body, self.filters)
- # Do the actual request, and time it
- start = time.time()
- self._log_request_start(method, req_url)
resp, resp_body = self.raw_request(
req_url, method, headers=req_headers, body=req_body,
chunked=chunked
)
- end = time.time()
- self._log_request(method, req_url, resp, secs=(end - start),
- req_headers=req_headers, req_body=req_body,
- resp_body=resp_body)
-
# Verify HTTP response codes
self.response_checker(method, resp, resp_body)
return resp, resp_body
- def raw_request(self, url, method, headers=None, body=None, chunked=False):
+ def raw_request(self, url, method, headers=None, body=None, chunked=False,
+ log_req_body=None):
"""Send a raw HTTP request without the keystone catalog or auth
This method sends a HTTP request in the same manner as the request()
@@ -580,14 +573,29 @@
explicitly requires no headers use an empty dict.
:param str body: Body to send with the request
:param bool chunked: sends the body with chunked encoding
+ :param str log_req_body: Whether to log the request body or not.
+ It is default to None which means request
+ body is safe to log otherwise pass any string
+ you want to log in place of request body.
+ For example: '<omitted>'
:rtype: tuple
:return: a tuple with the first entry containing the response headers
and the second the response body
"""
if headers is None:
headers = self.get_headers()
- return self.http_obj.request(url, method, headers=headers,
- body=body, chunked=chunked)
+ # Do the actual request, and time it
+ start = time.time()
+ self._log_request_start(method, url)
+ resp, resp_body = self.http_obj.request(
+ url, method, headers=headers,
+ body=body, chunked=chunked)
+ end = time.time()
+ req_body = body if log_req_body is None else log_req_body
+ self._log_request(method, url, resp, secs=(end - start),
+ req_headers=headers, req_body=req_body,
+ resp_body=resp_body)
+ return resp, resp_body
def request(self, method, url, extra_headers=False, headers=None,
body=None, chunked=False):
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index d4ec6ad..3a05f27 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -75,6 +75,11 @@
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
self.proxy_client = proxy_client
+ if (self.proxy_client and self.proxy_client.host == self.host and
+ self.proxy_client.port == self.port and
+ self.proxy_client.username == self.username):
+ raise exceptions.SSHClientProxyClientLoop(
+ host=self.host, port=self.port, username=self.username)
self._proxy_conn = None
def _get_ssh_connection(self, sleep=1.5, backoff=1):
@@ -114,8 +119,10 @@
ssh.close()
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
- " connection to %s@%s after %d attempts",
- self.username, self.host, attempts)
+ " connection to %s@%s after %d attempts. "
+ "Proxy client: %s",
+ self.username, self.host, attempts,
+ self._get_proxy_client_info())
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
@@ -196,11 +203,13 @@
exit_status = channel.recv_exit_status()
- if 0 != exit_status:
- raise exceptions.SSHExecCommandFailed(
- command=cmd, exit_status=exit_status,
- stderr=err_data, stdout=out_data)
- return out_data
+ ssh.close()
+
+ if 0 != exit_status:
+ raise exceptions.SSHExecCommandFailed(
+ command=cmd, exit_status=exit_status,
+ stderr=err_data, stdout=out_data)
+ return out_data
def test_connection_auth(self):
"""Raises an exception when we can not connect to server via ssh."""
@@ -217,3 +226,13 @@
cmd = 'nc %s %s' % (self.host, self.port)
chan.exec_command(cmd)
return chan
+
+ def _get_proxy_client_info(self):
+ if not self.proxy_client:
+ return 'no proxy client'
+ nested_pclient = self.proxy_client._get_proxy_client_info()
+ return ('%(username)s@%(host)s:%(port)s, nested proxy client: '
+ '%(nested_pclient)s' % {'username': self.proxy_client.username,
+ 'host': self.proxy_client.host,
+ 'port': self.proxy_client.port,
+ 'nested_pclient': nested_pclient})
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 13af890..84b7ee6 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -251,6 +251,11 @@
"stdout:\n%(stdout)s")
+class SSHClientProxyClientLoop(TempestException):
+ message = ("SSH client proxy client has same host: %(host)s, port: "
+ "%(port)s and username: %(username)s as parent")
+
+
class UnknownServiceClient(TempestException):
message = "Service clients named %(services)s are not known"
@@ -280,3 +285,12 @@
class InvalidParam(TempestException):
message = ("Invalid Parameter passed: %(invalid_param)s")
+
+
+class ConsistencyGroupException(TempestException):
+ message = "Consistency group %(cg_id)s failed and is in ERROR status"
+
+
+class ConsistencyGroupSnapshotException(TempestException):
+ message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is "
+ "in ERROR status")
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index f027772..a687137 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -435,7 +435,12 @@
return rest_client.ResponseBody(resp, body)
def update_attached_volume(self, server_id, attachment_id, **kwargs):
- """Swaps a volume attached to an instance for another volume"""
+ """Swaps a volume attached to an instance for another volume
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/compute/#update-a-volume-attachment
+ """
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.put('servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id),
diff --git a/tempest/lib/services/compute/versions_client.py b/tempest/lib/services/compute/versions_client.py
index 8fbb136..c6e1783 100644
--- a/tempest/lib/services/compute/versions_client.py
+++ b/tempest/lib/services/compute/versions_client.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import versions as schema
@@ -26,11 +24,7 @@
def list_versions(self):
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
body = json.loads(body)
diff --git a/tempest/lib/services/identity/v2/token_client.py b/tempest/lib/services/identity/v2/token_client.py
index 458c862..9f10f58 100644
--- a/tempest/lib/services/identity/v2/token_client.py
+++ b/tempest/lib/services/identity/v2/token_client.py
@@ -105,9 +105,8 @@
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
- headers=headers, body=body)
- self._log_request(method, url, resp, req_headers=headers,
- req_body='<omitted>', resp_body=resp_body)
+ headers=headers, body=body,
+ log_req_body='<omitted>')
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index 43a9020..f9356be 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -42,8 +42,12 @@
return rest_client.ResponseBody(resp, body)
def list_roles(self, **params):
- """Get the list of Roles."""
+ """Get the list of Roles.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3/index.html#list-roles
+ """
url = 'roles'
if params:
url += '?%s' % urllib.urlencode(params)
diff --git a/tempest/lib/services/identity/v3/token_client.py b/tempest/lib/services/identity/v3/token_client.py
index d591f03..6956297 100644
--- a/tempest/lib/services/identity/v3/token_client.py
+++ b/tempest/lib/services/identity/v3/token_client.py
@@ -160,10 +160,8 @@
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
- headers=headers, body=body)
- self._log_request(method, url, resp, req_headers=headers,
- req_body='<omitted>', resp_body=resp_body)
-
+ headers=headers, body=body,
+ log_req_body='<omitted>')
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise exceptions.Unauthorized(resp_body['error']['message'])
diff --git a/tempest/lib/services/identity/v3/versions_client.py b/tempest/lib/services/identity/v3/versions_client.py
index 441ee0d..f3a8986 100644
--- a/tempest/lib/services/identity/v3/versions_client.py
+++ b/tempest/lib/services/identity/v3/versions_client.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
@@ -26,11 +24,7 @@
"""List API versions"""
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
self.expected_success(300, resp.status)
diff --git a/tempest/lib/services/image/v2/versions_client.py b/tempest/lib/services/image/v2/versions_client.py
index 1adc466..1b7f806 100644
--- a/tempest/lib/services/image/v2/versions_client.py
+++ b/tempest/lib/services/image/v2/versions_client.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
@@ -26,11 +24,7 @@
"""List API versions"""
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
self.expected_success(300, resp.status)
diff --git a/tempest/lib/services/network/extensions_client.py b/tempest/lib/services/network/extensions_client.py
index 3910c84..4701cb0 100644
--- a/tempest/lib/services/network/extensions_client.py
+++ b/tempest/lib/services/network/extensions_client.py
@@ -16,9 +16,21 @@
class ExtensionsClient(base.BaseNetworkClient):
def show_extension(self, ext_alias, **fields):
+ """Show extension details.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-extension-details
+ """
uri = '/extensions/%s' % ext_alias
return self.show_resource(uri, **fields)
def list_extensions(self, **filters):
+ """List extensions.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-extensions
+ """
uri = '/extensions'
return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/object_storage/capabilities_client.py b/tempest/lib/services/object_storage/capabilities_client.py
index d31bbc2..f08bd9a 100644
--- a/tempest/lib/services/object_storage/capabilities_client.py
+++ b/tempest/lib/services/object_storage/capabilities_client.py
@@ -21,9 +21,10 @@
class CapabilitiesClient(rest_client.RestClient):
def list_capabilities(self):
- self.skip_path()
try:
- resp, body = self.get('info')
+ url = self._get_base_version_url() + 'info'
+ resp, body = self.raw_request(url, 'GET')
+ self._error_checker(resp, body)
finally:
self.reset_path()
body = json.loads(body)
diff --git a/tempest/lib/services/volume/v1/backups_client.py b/tempest/lib/services/volume/v1/backups_client.py
index edc5dda..2289253 100644
--- a/tempest/lib/services/volume/v1/backups_client.py
+++ b/tempest/lib/services/volume/v1/backups_client.py
@@ -83,6 +83,9 @@
def import_backup(self, **kwargs):
"""Import backup metadata record."""
+ # TODO(linanbj): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
post_body = json.dumps({'backup-record': kwargs})
resp, body = self.post("backups/import_record", post_body)
body = json.loads(body)
diff --git a/tempest/lib/services/volume/v3/capabilities_client.py b/tempest/lib/services/volume/v3/capabilities_client.py
index ac2cd02..dc850a8 100644
--- a/tempest/lib/services/volume/v3/capabilities_client.py
+++ b/tempest/lib/services/volume/v3/capabilities_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import capabilities as schema
from tempest.lib.common import rest_client
@@ -30,5 +31,5 @@
url = 'capabilities/%s' % host
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_backend_capabilities, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/hosts_client.py b/tempest/lib/services/volume/v3/hosts_client.py
index c95d2d2..019a852 100644
--- a/tempest/lib/services/volume/v3/hosts_client.py
+++ b/tempest/lib/services/volume/v3/hosts_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import hosts as schema
from tempest.lib.common import rest_client
@@ -35,13 +36,13 @@
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_hosts, resp, body)
return rest_client.ResponseBody(resp, body)
def show_host(self, host_name):
"""Show host details."""
url = 'os-hosts/%s' % host_name
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.show_host, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/messages_client.py b/tempest/lib/services/volume/v3/messages_client.py
index 47538cd..b770fac 100644
--- a/tempest/lib/services/volume/v3/messages_client.py
+++ b/tempest/lib/services/volume/v3/messages_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import messages as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
@@ -28,7 +29,7 @@
url = 'messages/%s' % str(message_id)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_message, resp, body)
return rest_client.ResponseBody(resp, body)
def list_messages(self):
@@ -36,14 +37,14 @@
url = 'messages'
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_messages, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_message(self, message_id):
"""Delete a single message."""
url = 'messages/%s' % str(message_id)
resp, body = self.delete(url)
- self.expected_success(204, resp.status)
+ self.validate_response(schema.delete_message, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
diff --git a/tempest/lib/services/volume/v3/quotas_client.py b/tempest/lib/services/volume/v3/quotas_client.py
index 4d680c1..5b1a52c 100644
--- a/tempest/lib/services/volume/v3/quotas_client.py
+++ b/tempest/lib/services/volume/v3/quotas_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import quotas as schema
from tempest.lib.common import rest_client
@@ -27,8 +28,8 @@
url = 'os-quota-sets/%s/defaults' % tenant_id
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = jsonutils.loads(body)
+ self.validate_response(schema.show_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
def show_quota_set(self, tenant_id, params=None):
@@ -39,8 +40,11 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- self.expected_success(200, resp.status)
body = jsonutils.loads(body)
+ if params and params.get('usage', False):
+ self.validate_response(schema.show_quota_set_usage, resp, body)
+ else:
+ self.validate_response(schema.show_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
def update_quota_set(self, tenant_id, **kwargs):
@@ -52,12 +56,12 @@
"""
put_body = jsonutils.dumps({'quota_set': kwargs})
resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
- self.expected_success(200, resp.status)
body = jsonutils.loads(body)
+ self.validate_response(schema.update_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.delete_quota_set, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/snapshot_manage_client.py b/tempest/lib/services/volume/v3/snapshot_manage_client.py
index 43fd328..77920e4 100644
--- a/tempest/lib/services/volume/v3/snapshot_manage_client.py
+++ b/tempest/lib/services/volume/v3/snapshot_manage_client.py
@@ -15,6 +15,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import manage_snapshot as schema
from tempest.lib.common import rest_client
@@ -22,10 +23,15 @@
"""Snapshot manage client."""
def manage_snapshot(self, **kwargs):
- """Manage a snapshot."""
+ """Manage a snapshot.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#manage-an-existing-snapshot
+ """
post_body = json.dumps({'snapshot': kwargs})
url = 'os-snapshot-manage'
resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
body = json.loads(body)
+ self.validate_response(schema.manage_snapshot, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/transfers_client.py b/tempest/lib/services/volume/v3/transfers_client.py
index 39e3475..f572f95 100644
--- a/tempest/lib/services/volume/v3/transfers_client.py
+++ b/tempest/lib/services/volume/v3/transfers_client.py
@@ -16,6 +16,7 @@
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
+from tempest.lib.api_schema.response.volume import transfers as schema
from tempest.lib.common import rest_client
@@ -32,7 +33,7 @@
post_body = json.dumps({'transfer': kwargs})
resp, body = self.post('os-volume-transfer', post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.create_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_transfer(self, transfer_id):
@@ -40,7 +41,7 @@
url = "os-volume-transfer/%s" % transfer_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_transfers(self, detail=False, **params):
@@ -52,19 +53,21 @@
https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-and-details
"""
url = 'os-volume-transfer'
+ schema_list_transfers = schema.list_volume_transfers_no_detail
if detail:
url += '/detail'
+ schema_list_transfers = schema.list_volume_transfers_with_detail
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema_list_transfers, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_volume_transfer(self, transfer_id):
"""Delete a volume transfer."""
resp, body = self.delete("os-volume-transfer/%s" % transfer_id)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
def accept_volume_transfer(self, transfer_id, **kwargs):
@@ -78,5 +81,5 @@
post_body = json.dumps({'accept': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.accept_volume_transfer, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/versions_client.py b/tempest/lib/services/volume/v3/versions_client.py
index fc8e92f..aa6c867 100644
--- a/tempest/lib/services/volume/v3/versions_client.py
+++ b/tempest/lib/services/volume/v3/versions_client.py
@@ -12,8 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-import time
+from six.moves.urllib.parse import urljoin
from oslo_serialization import jsonutils as json
@@ -33,14 +32,10 @@
"""
version_url = self._get_base_version_url()
- start = time.time()
resp, body = self.raw_request(version_url, 'GET')
- end = time.time()
# NOTE: We need a raw_request() here instead of request() call because
# "list API versions" API doesn't require an authentication and we can
# skip it with raw_request() call.
- self._log_request('GET', version_url, resp, secs=(end - start),
- resp_body=body)
self._error_checker(resp, body)
body = json.loads(body)
@@ -50,13 +45,18 @@
def show_version(self, version):
"""Show API version details
+ Use raw_request in order to have access to the endpoints minus
+ version and project in order to add version only back.
+
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#show-api-v3-details
"""
- version_url = os.path.join(self._get_base_version_url(), version)
- resp, body = self.get(version_url)
+ version_url = urljoin(self._get_base_version_url(), version + '/')
+ resp, body = self.raw_request(version_url, 'GET',
+ {'X-Auth-Token': self.token})
+ self._error_checker(resp, body)
body = json.loads(body)
self.validate_response(schema.volume_api_version_details, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index e60382a..4fb6d2e 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -142,7 +142,12 @@
return rest_client.ResponseBody(resp, body)
def upload_volume(self, volume_id, **kwargs):
- """Uploads a volume in Glance."""
+ """Uploads a volume in Glance.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#upload-volume-to-image
+ """
post_body = json.dumps({'os-volume_upload_image': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
@@ -248,7 +253,12 @@
return rest_client.ResponseBody(resp, body)
def update_volume_readonly(self, volume_id, **kwargs):
- """Update the Specified Volume readonly."""
+ """Update the Specified Volume readonly.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#updates-volume-read-only-access-mode-flag
+ """
post_body = json.dumps({'os-update_readonly_flag': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
@@ -339,7 +349,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://docs.openstack.org/api-ref/block-storage/v3/index.html#force-delete-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#force-detach-a-volume
"""
post_body = json.dumps({'os-force_detach': kwargs})
url = 'volumes/%s/action' % volume_id
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 1252f09..cb7acbf 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -634,8 +634,7 @@
def nova_volume_attach(self, server, volume_to_attach):
volume = self.servers_client.attach_volume(
- server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
- % CONF.compute.volume_device_name)['volumeAttachment']
+ server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
@@ -811,6 +810,42 @@
server_details = cls.os_admin.servers_client.show_server(server_id)
return server_details['server']['OS-EXT-SRV-ATTR:host']
+ def _get_bdm(self, source_id, source_type, delete_on_termination=False):
+ bd_map_v2 = [{
+ 'uuid': source_id,
+ 'source_type': source_type,
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': delete_on_termination}]
+ return {'block_device_mapping_v2': bd_map_v2}
+
+ def boot_instance_from_resource(self, source_id,
+ source_type,
+ keypair=None,
+ security_group=None,
+ delete_on_termination=False,
+ name=None):
+ create_kwargs = dict()
+ if keypair:
+ create_kwargs['key_name'] = keypair['name']
+ if security_group:
+ create_kwargs['security_groups'] = [
+ {'name': security_group['name']}]
+ create_kwargs.update(self._get_bdm(
+ source_id,
+ source_type,
+ delete_on_termination=delete_on_termination))
+ if name:
+ create_kwargs['name'] = name
+
+ return self.create_server(image_id='', **create_kwargs)
+
+ def create_volume_from_image(self):
+ img_uuid = CONF.compute.image_ref
+ vol_name = data_utils.rand_name(
+ self.__class__.__name__ + '-volume-origin')
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
+
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index cee543b..4cd860d 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -61,7 +61,11 @@
def cinder_show(self, volume):
got_volume = self.volumes_client.show_volume(volume['id'])['volume']
- self.assertEqual(volume, got_volume)
+ # Exclude updated_at because of bug 1838202.
+ excluded_keys = ['updated_at']
+ self.assertThat(
+ volume, custom_matchers.MatchesDictExceptForKeys(
+ got_volume, excluded_keys=excluded_keys))
def nova_reboot(self, server):
self.servers_client.reboot_server(server['id'], type='SOFT')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f03e9de..b1919d4 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -249,12 +249,16 @@
block_migration = (CONF.compute_feature_enabled.
block_migration_for_live_migration)
+ old_host = self.get_host_for_server(server['id'])
self.admin_servers_client.live_migrate_server(
server['id'], host=None, block_migration=block_migration,
disk_over_commit=False)
waiters.wait_for_server_status(self.servers_client,
server['id'], 'ACTIVE')
+ new_host = self.get_host_for_server(server['id'])
+ self.assertNotEqual(old_host, new_host, 'Server did not migrate')
+
self._wait_server_status_and_check_network_connectivity(
server, keypair, floating_ip)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index af79ea0..c3b3670 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -55,20 +55,24 @@
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- def _wait_for_volume_available_on_the_system(self, ip_address,
- private_key):
+ def _attached_volume_name(
+ self, disks_list_before_attach, ip_address, private_key):
ssh = self.get_remote_client(ip_address, private_key=private_key)
- def _func():
- disks = ssh.get_disks()
- LOG.debug("Disks: %s", disks)
- return CONF.compute.volume_device_name in disks
+ def _wait_for_volume_available_on_system():
+ disks_list_after_attach = ssh.list_disks()
+ return len(disks_list_after_attach) > len(disks_list_before_attach)
- if not test_utils.call_until_true(_func,
+ if not test_utils.call_until_true(_wait_for_volume_available_on_system,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise lib_exc.TimeoutException
+ disks_list_after_attach = ssh.list_disks()
+ volume_name = [item for item in disks_list_after_attach
+ if item not in disks_list_before_attach][0]
+ return volume_name
+
@decorators.attr(type='slow')
@decorators.idempotent_id('10fd234a-515c-41e5-b092-8323060598c5')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
@@ -91,15 +95,16 @@
ip_for_server = self.get_server_ip(server)
# Make sure the machine ssh-able before attaching the volume
- self.get_remote_client(ip_for_server,
- private_key=keypair['private_key'],
- server=server)
-
+ linux_client = self.get_remote_client(
+ ip_for_server, private_key=keypair['private_key'],
+ server=server)
+ disks_list_before_attach = linux_client.list_disks()
self.nova_volume_attach(server, volume)
- self._wait_for_volume_available_on_the_system(ip_for_server,
- keypair['private_key'])
+ volume_device_name = self._attached_volume_name(
+ disks_list_before_attach, ip_for_server, keypair['private_key'])
+
timestamp = self.create_timestamp(ip_for_server,
- CONF.compute.volume_device_name,
+ volume_device_name,
private_key=keypair['private_key'],
server=server)
self.nova_volume_detach(server, volume)
@@ -126,18 +131,19 @@
# Make sure the machine ssh-able before attaching the volume
# Just a live machine is responding
# for device attache/detach as expected
- self.get_remote_client(ip_for_snapshot,
- private_key=keypair['private_key'],
- server=server_from_snapshot)
+ linux_client = self.get_remote_client(
+ ip_for_snapshot, private_key=keypair['private_key'],
+ server=server_from_snapshot)
+ disks_list_before_attach = linux_client.list_disks()
# attach volume2 to instance2
self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
- self._wait_for_volume_available_on_the_system(ip_for_snapshot,
- keypair['private_key'])
+ volume_device_name = self._attached_volume_name(
+ disks_list_before_attach, ip_for_snapshot, keypair['private_key'])
# check the existence of the timestamp file in the volume2
timestamp2 = self.get_timestamp(ip_for_snapshot,
- CONF.compute.volume_device_name,
+ volume_device_name,
private_key=keypair['private_key'],
server=server_from_snapshot)
self.assertEqual(timestamp, timestamp2)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 6ed7e30..0782389 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -31,42 +31,6 @@
# breathing room to get through deletes in the time allotted.
TIMEOUT_SCALING_FACTOR = 2
- def _create_volume_from_image(self):
- img_uuid = CONF.compute.image_ref
- vol_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
-
- def _get_bdm(self, source_id, source_type, delete_on_termination=False):
- bd_map_v2 = [{
- 'uuid': source_id,
- 'source_type': source_type,
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'delete_on_termination': delete_on_termination}]
- return {'block_device_mapping_v2': bd_map_v2}
-
- def _boot_instance_from_resource(self, source_id,
- source_type,
- keypair=None,
- security_group=None,
- delete_on_termination=False,
- name=None):
- create_kwargs = dict()
- if keypair:
- create_kwargs['key_name'] = keypair['name']
- if security_group:
- create_kwargs['security_groups'] = [
- {'name': security_group['name']}]
- create_kwargs.update(self._get_bdm(
- source_id,
- source_type,
- delete_on_termination=delete_on_termination))
- if name:
- create_kwargs['name'] = name
-
- return self.create_server(image_id='', **create_kwargs)
-
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
@@ -104,8 +68,8 @@
# create an instance from volume
LOG.info("Booting instance 1 from volume")
- volume_origin = self._create_volume_from_image()
- instance_1st = self._boot_instance_from_resource(
+ volume_origin = self.create_volume_from_image()
+ instance_1st = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
@@ -124,7 +88,7 @@
self._delete_server(instance_1st)
# create a 2nd instance from volume
- instance_2nd = self._boot_instance_from_resource(
+ instance_2nd = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
@@ -149,10 +113,10 @@
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
server_from_snapshot = (
- self._boot_instance_from_resource(source_id=volume['id'],
- source_type='volume',
- keypair=keypair,
- security_group=security_group))
+ self.boot_instance_from_resource(source_id=volume['id'],
+ source_type='volume',
+ keypair=keypair,
+ security_group=security_group))
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
@@ -171,13 +135,13 @@
@utils.services('compute', 'image', 'volume')
def test_create_server_from_volume_snapshot(self):
# Create a volume from an image
- boot_volume = self._create_volume_from_image()
+ boot_volume = self.create_volume_from_image()
# Create a snapshot
boot_snapshot = self.create_volume_snapshot(boot_volume['id'])
# Create a server from a volume snapshot
- server = self._boot_instance_from_resource(
+ server = self.boot_instance_from_resource(
source_id=boot_snapshot['id'],
source_type='snapshot',
delete_on_termination=True)
@@ -203,16 +167,23 @@
self.assertEqual(created_volume[0]['id'],
created_volume_info['attachments'][0]['volume_id'])
+ # Delete the server and wait
+ self._delete_server(server)
+
+ # Assert that the underlying volume is gone before class tearDown
+ # to prevent snapshot deletion from failing
+ self.volumes_client.wait_for_resource_deletion(created_volume[0]['id'])
+
@decorators.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
'Cinder volume snapshots are disabled')
@utils.services('compute', 'volume', 'image')
def test_image_defined_boot_from_volume(self):
# create an instance from image-backed volume
- volume_origin = self._create_volume_from_image()
+ volume_origin = self.create_volume_from_image()
name = data_utils.rand_name(self.__class__.__name__ +
'-volume-backed-server')
- instance1 = self._boot_instance_from_resource(
+ instance1 = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
delete_on_termination=True,
@@ -288,7 +259,7 @@
self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
# Boot a server from the encrypted volume
- server = self._boot_instance_from_resource(
+ server = self.boot_instance_from_resource(
source_id=volume['id'],
source_type='volume',
delete_on_termination=False)
diff --git a/tempest/test.py b/tempest/test.py
index 85000b6..f383bc1 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -27,6 +27,7 @@
from tempest.common import credentials_factory as credentials
from tempest.common import utils
from tempest import config
+from tempest.lib import base as lib_base
from tempest.lib.common import fixed_network
from tempest.lib.common import profiler
from tempest.lib.common import validation_resources as vr
@@ -148,11 +149,25 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
# All the configuration checks that may generate a skip
- cls.skip_checks()
- if not cls.__skip_checks_called:
- raise RuntimeError("skip_checks for %s did not call the super's "
- "skip_checks" % cls.__name__)
+ # TODO(gmann): cls.handle_skip_exception is really workaround for
+ # testtools bug- https://github.com/testing-cabal/testtools/issues/272
+ # stestr which is used by Tempest internally to run the test switch
+ # the customize test runner(which use stdlib unittest) for >=py3.5
+ # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
+ # These two test runner are not compatible due to skip exception
+ # handling(due to unittest2). testtools.run treat unittestt.SkipTest
+ # as error and stdlib unittest treat unittest2.case.SkipTest raised
+ # by testtools.TestCase.skipException.
+ # The below workaround can be removed once testtools fix issue# 272.
+ orig_skip_exception = testtools.TestCase.skipException
+ lib_base._handle_skip_exception()
try:
+ cls.skip_checks()
+
+ if not cls.__skip_checks_called:
+ raise RuntimeError(
+ "skip_checks for %s did not call the super's "
+ "skip_checks" % cls.__name__)
# Allocation of all required credentials and client managers
cls._teardowns.append(('credentials', cls.clear_credentials))
cls.setup_credentials()
@@ -173,6 +188,8 @@
six.reraise(etype, value, trace)
finally:
del trace # to avoid circular refs
+ finally:
+ testtools.TestCase.skipException = orig_skip_exception
@classmethod
def tearDownClass(cls):
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index b349bba..a962e37 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -28,7 +28,6 @@
self.os_username = 'fake_user'
self.os_password = 'fake_password'
self.os_project_name = 'fake_project_name'
- self.os_tenant_name = None
self.os_domain_name = 'fake_domain'
self.tag = 'fake'
self.concurrency = 2
@@ -100,15 +99,6 @@
self.assertEqual(self.opts.os_password, admin_creds.password)
self.assertFalse(hasattr(admin_creds, 'domain_name'))
- def test_get_credential_provider_with_tenant(self):
- self.opts.os_project_name = None
- self.opts.os_tenant_name = 'fake_tenant'
- cp = account_generator.get_credential_provider(self.opts)
- admin_creds = cp.default_admin_creds
- self.assertEqual(self.opts.os_tenant_name, admin_creds.tenant_name)
- self.assertEqual(self.opts.os_username, admin_creds.username)
- self.assertEqual(self.opts.os_password, admin_creds.password)
-
class TestAccountGeneratorV3(TestAccountGeneratorV2):
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index de0dbec..8366290 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -175,7 +175,8 @@
"ports": {u'aa74aa4v-741a': u'saved-port'},
"security_groups": {u'7q844add-3697': u'saved-sec-group'},
"subnets": {u'55ttda4a-2584': u'saved-subnet'},
- "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'}
+ "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'},
+ "regions": {u'RegionOne': {}}
}
# Mocked methods
get_method = 'tempest.lib.common.rest_client.RestClient.get'
@@ -195,6 +196,7 @@
is_save_state=is_save_state,
is_preserve=is_preserve,
is_dry_run=is_dry_run,
+ project_id='b8e3ece07bb049138d224436756e3b57',
data={},
saved_state_json=self.saved_state
)
@@ -532,6 +534,135 @@
self._test_saved_state_true([(self.get_method, self.response, 200)])
+class TestVolumeQuotaService(BaseCmdServiceTests):
+
+ service_class = 'VolumeQuotaService'
+ service_name = 'volume_quota_service'
+ response = {
+ "quota_set": {
+ "groups":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "per_volume_gigabytes":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "volumes":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "gigabytes":
+ {"reserved": 0, "limit": 1000, "in_use": 0},
+ "backup_gigabytes":
+ {"reserved": 0, "limit": 1000, "in_use": 0},
+ "snapshots":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "volumes_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "snapshots_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "backups":
+ {"reserved": 0, "limit": 10, "in_use": 0},
+ "gigabytes_iscsi":
+ {"reserved": 0, "limit": -1, "in_use": 0},
+ "id": "b8e3ece07bb049138d224436756e3b57"
+ }
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 200),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+class TestNovaQuotaService(BaseCmdServiceTests):
+
+ service_class = 'NovaQuotaService'
+ service_name = 'nova_quota_service'
+ response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 128,
+ "maxPersonality": 5,
+ "totalServerGroupsUsed": 0,
+ "maxImageMeta": 128,
+ "maxPersonalitySize": 10240,
+ "maxTotalKeypairs": 100,
+ "maxSecurityGroupRules": 20,
+ "maxServerGroups": 10,
+ "totalCoresUsed": 0,
+ "totalRAMUsed": 0,
+ "totalInstancesUsed": 0,
+ "maxSecurityGroups": 10,
+ "totalFloatingIpsUsed": 0,
+ "maxTotalCores": 20,
+ "maxServerGroupMembers": 10,
+ "maxTotalFloatingIps": 10,
+ "totalSecurityGroupsUsed": 0,
+ "maxTotalInstances": 10,
+ "maxTotalRAMSize": 51200
+ }
+ }
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
+class TestNetworkQuotaService(BaseCmdServiceTests):
+
+ service_class = 'NetworkQuotaService'
+ service_name = 'network_quota_service'
+ response = {
+ "quotas": [{
+ "subnet": 110,
+ "network": 100,
+ "floatingip": 50,
+ "tenant_id": "81e8490db559474dacb2212fca9cca2d",
+ "subnetpool": -1,
+ "security_group_rule": 100,
+ "trunk": -1,
+ "security_group": 10,
+ "router": 10,
+ "rbac_policy": 10, "project_id":
+ "81e8490db559474dacb2212fca9cca2d", "port": 500
+ }]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+
# Begin network service classes
class TestNetworkService(BaseCmdServiceTests):
@@ -1202,6 +1333,57 @@
# begin global services
+class TestRegionService(BaseCmdServiceTests):
+ service_class = 'RegionService'
+ service_name = 'regions'
+ response = {
+ "regions": [{
+ "parent_region_id": None,
+ "id": "RegionOne",
+ "links": {
+ "self":
+ "http://10.0.145.61:5000/v3/regions/RegionOne"
+ },
+ "description": ""
+ },
+ {
+ "parent_region_id": None,
+ "id": "RegionTwo",
+ "links": {
+ "self":
+ "http://10.0.145.61:5000/v3/regions/RegionTwo"
+ },
+ "description": ""
+ }],
+ "links": {
+ "self":
+ "http://10.0.145.61:5000/v3/regions",
+ "next": None,
+ "previous": None
+ }
+ }
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, "exception", None)]
+ self._test_delete(delete_mock)
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, "exception", None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
class TestDomainService(BaseCmdServiceTests):
service_class = 'DomainService'
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 02e1c99..e3bb836 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -15,6 +15,7 @@
import time
import mock
+from oslo_utils.fixture import uuidsentinel as uuids
from tempest.common import waiters
from tempest import exceptions
@@ -54,44 +55,6 @@
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
- @mock.patch.object(time, 'sleep')
- def test_wait_for_volume_status_error_restoring(self, mock_sleep):
- # Tests that the wait method raises VolumeRestoreErrorException if
- # the volume status is 'error_restoring'.
- client = mock.Mock(spec=volumes_client.VolumesClient,
- resource_type="volume",
- build_interval=1)
- volume1 = {'volume': {'status': 'restoring-backup'}}
- volume2 = {'volume': {'status': 'error_restoring'}}
- mock_show = mock.Mock(side_effect=(volume1, volume2))
- client.show_volume = mock_show
- volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
- self.assertRaises(exceptions.VolumeRestoreErrorException,
- waiters.wait_for_volume_resource_status,
- client, volume_id, 'available')
- mock_show.assert_has_calls([mock.call(volume_id),
- mock.call(volume_id)])
- mock_sleep.assert_called_once_with(1)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_for_volume_status_error_extending(self, mock_sleep):
- # Tests that the wait method raises VolumeExtendErrorException if
- # the volume status is 'error_extending'.
- client = mock.Mock(spec=volumes_client.VolumesClient,
- resource_type="volume",
- build_interval=1)
- volume1 = {'volume': {'status': 'extending'}}
- volume2 = {'volume': {'status': 'error_extending'}}
- mock_show = mock.Mock(side_effect=(volume1, volume2))
- client.show_volume = mock_show
- volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
- self.assertRaises(exceptions.VolumeExtendErrorException,
- waiters.wait_for_volume_resource_status,
- client, volume_id, 'available')
- mock_show.assert_has_calls([mock.call(volume_id),
- mock.call(volume_id)])
- mock_sleep.assert_called_once_with(1)
-
class TestInterfaceWaiters(base.TestCase):
@@ -232,3 +195,89 @@
show_volume.assert_has_calls([mock.call(mock.sentinel.volume_id),
mock.call(mock.sentinel.volume_id),
mock.call(mock.sentinel.volume_id)])
+
+ @mock.patch.object(time, 'sleep')
+ def test_wait_for_volume_status_error_restoring(self, mock_sleep):
+ # Tests that the wait method raises VolumeRestoreErrorException if
+ # the volume status is 'error_restoring'.
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1)
+ volume1 = {'volume': {'status': 'restoring-backup'}}
+ volume2 = {'volume': {'status': 'error_restoring'}}
+ mock_show = mock.Mock(side_effect=(volume1, volume2))
+ client.show_volume = mock_show
+ volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+ self.assertRaises(exceptions.VolumeRestoreErrorException,
+ waiters.wait_for_volume_resource_status,
+ client, volume_id, 'available')
+ mock_show.assert_has_calls([mock.call(volume_id),
+ mock.call(volume_id)])
+ mock_sleep.assert_called_once_with(1)
+
+ @mock.patch.object(time, 'sleep')
+ def test_wait_for_volume_status_error_extending(self, mock_sleep):
+ # Tests that the wait method raises VolumeExtendErrorException if
+ # the volume status is 'error_extending'.
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1)
+ volume1 = {'volume': {'status': 'extending'}}
+ volume2 = {'volume': {'status': 'error_extending'}}
+ mock_show = mock.Mock(side_effect=(volume1, volume2))
+ client.show_volume = mock_show
+ volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+ self.assertRaises(exceptions.VolumeExtendErrorException,
+ waiters.wait_for_volume_resource_status,
+ client, volume_id, 'available')
+ mock_show.assert_has_calls([mock.call(volume_id),
+ mock.call(volume_id)])
+ mock_sleep.assert_called_once_with(1)
+
+ def test_wait_for_volume_attachment(self):
+ vol_detached = {'volume': {'attachments': []}}
+ vol_attached = {'volume': {'attachments': [
+ {'attachment_id': uuids.attachment_id}]}}
+ show_volume = mock.MagicMock(side_effect=[
+ vol_attached, vol_attached, vol_detached])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=5,
+ show_volume=show_volume)
+ self.patch('time.time')
+ self.patch('time.sleep')
+ waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
+ uuids.attachment_id)
+ # Assert that show volume is called until the attachment is removed.
+ show_volume.assert_has_calls = [mock.call(uuids.volume_id),
+ mock.call(uuids.volume_id),
+ mock.call(uuids.volume_id)]
+
+ def test_wait_for_volume_attachment_timeout(self):
+ show_volume = mock.MagicMock(return_value={
+ 'volume': {'attachments': [
+ {'attachment_id': uuids.attachment_id}]}})
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ # Assert that a timeout is raised if the attachment remains.
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_attachment_remove,
+ client, uuids.volume_id, uuids.attachment_id)
+
+ def test_wait_for_volume_attachment_not_present(self):
+ show_volume = mock.MagicMock(return_value={
+ 'volume': {'attachments': []}})
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
+ uuids.attachment_id)
+ # Assert that show volume is only called once before we return
+ show_volume.assert_called_once_with(uuids.volume_id)
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 1f0080f..937f93a 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -88,7 +88,7 @@
# the information using gnu/linux tools.
def _assert_exec_called_with(self, cmd):
- cmd = "set -eu -o pipefail; PATH=$PATH:/sbin; " + cmd
+ cmd = "set -eu -o pipefail; PATH=$PATH:/sbin:/usr/sbin; " + cmd
self.ssh_mock.mock.exec_command.assert_called_with(cmd)
def test_get_disks(self):
@@ -106,6 +106,16 @@
self.assertEqual(self.conn.get_disks(), result)
self._assert_exec_called_with('lsblk -lb --nodeps')
+ def test_list_disks(self):
+ output_lsblk = """\
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sda 8:0 0 128035676160 0 disk
+sdb 8:16 0 1000204886016 0 disk
+sr0 11:0 1 1073741312 0 rom"""
+ disk_list = ['sda', 'sdb']
+ self.ssh_mock.mock.exec_command.return_value = output_lsblk
+ self.assertEqual(self.conn.list_disks(), disk_list)
+
def test_get_boot_time(self):
booted_at = 10000
uptime_sec = 5000.02
diff --git a/tempest/tests/lib/fake_identity.py b/tempest/tests/lib/fake_identity.py
index 8bae34f..9d7b0fd 100644
--- a/tempest/tests/lib/fake_identity.py
+++ b/tempest/tests/lib/fake_identity.py
@@ -192,7 +192,7 @@
def _fake_v3_response(self, uri, method="GET", body=None, headers=None,
- redirections=5, connection_type=None):
+ redirections=5, connection_type=None, log_req_body=None):
fake_headers = {
"x-subject-token": TOKEN
}
@@ -202,7 +202,7 @@
def _fake_v3_response_domain_scope(self, uri, method="GET", body=None,
headers=None, redirections=5,
- connection_type=None):
+ connection_type=None, log_req_body=None):
fake_headers = {
"status": "201",
"x-subject-token": TOKEN
@@ -213,7 +213,7 @@
def _fake_v3_response_no_scope(self, uri, method="GET", body=None,
headers=None, redirections=5,
- connection_type=None):
+ connection_type=None, log_req_body=None):
fake_headers = {
"status": "201",
"x-subject-token": TOKEN
@@ -223,7 +223,7 @@
def _fake_v2_response(self, uri, method="GET", body=None, headers=None,
- redirections=5, connection_type=None):
+ redirections=5, connection_type=None, log_req_body=None):
return (fake_http.fake_http_response({}, status=200),
json.dumps(IDENTITY_V2_RESPONSE))
diff --git a/tempest/tests/lib/services/identity/v2/test_token_client.py b/tempest/tests/lib/services/identity/v2/test_token_client.py
index a592ada..5b4e210 100644
--- a/tempest/tests/lib/services/identity/v2/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v2/test_token_client.py
@@ -86,6 +86,9 @@
with mock.patch.object(token_client_v2, 'raw_request') as mock_raw_r:
mock_raw_r.return_value = response, body
resp, body = token_client_v2.request('GET', 'fake_uri')
+ mock_raw_r.assert_called_once_with('fake_uri', 'GET',
+ headers=mock.ANY, body=None,
+ log_req_body='<omitted>')
self.assertIsInstance(body, dict)
def test_request_with_bytes_body(self):
diff --git a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
index 9bf9b68..2774c44 100644
--- a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
@@ -146,7 +146,7 @@
def test_list_application_credential_with_bytes_body(self):
self._test_list_app_creds(bytes_body=True)
- def test_delete_trust(self):
+ def test_delete_application_credential(self):
self.check_service_client_function(
self.client.delete_application_credential,
'tempest.lib.common.rest_client.RestClient.delete',
diff --git a/tempest/tests/lib/services/identity/v3/test_groups_client.py b/tempest/tests/lib/services/identity/v3/test_groups_client.py
index 38cf3ae..e3c9851 100644
--- a/tempest/tests/lib/services/identity/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_groups_client.py
@@ -211,3 +211,13 @@
group_id='6e13e2068cf9466e98950595baf6bb35',
user_id='642688fa65a84217b86cef3c063de2b9',
)
+
+ def test_delete_group_user(self):
+ self.check_service_client_function(
+ self.client.delete_group_user,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ status=204,
+ group_id='6e13e2068cf9466e98950595baf6bb35',
+ user_id='642688fa65a84217b86cef3c063de2b9',
+ )
diff --git a/tempest/tests/lib/services/identity/v3/test_projects_client.py b/tempest/tests/lib/services/identity/v3/test_projects_client.py
index 6ffbcde..d26de06 100644
--- a/tempest/tests/lib/services/identity/v3/test_projects_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_projects_client.py
@@ -62,7 +62,8 @@
"/0c4e939acacf4376bdcd1129f1a054ad"
},
"name": "admin",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
},
{
"is_domain": False,
@@ -75,7 +76,8 @@
"/0cbd49cbf76d405d9c86562e1d579bd3"
},
"name": "demo",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
},
{
"is_domain": False,
@@ -88,7 +90,8 @@
"/2db68fed84324f29bb73130c6c2094fb"
},
"name": "swifttenanttest2",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
},
{
"is_domain": False,
@@ -101,7 +104,8 @@
"/3d594eb0f04741069dbbb521635b21c7"
},
"name": "service",
- "parent_id": None
+ "parent_id": None,
+ "tags": []
}
]
}
diff --git a/tempest/tests/lib/services/identity/v3/test_token_client.py b/tempest/tests/lib/services/identity/v3/test_token_client.py
index a9c58df..656e10a 100644
--- a/tempest/tests/lib/services/identity/v3/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_token_client.py
@@ -136,6 +136,9 @@
mock_raw_r.return_value = (
fake_identity._fake_v3_response(None, None))
resp, body = token_client_v3.request('GET', 'fake_uri')
+ mock_raw_r.assert_called_once_with('fake_uri', 'GET',
+ headers=mock.ANY, body=None,
+ log_req_body='<omitted>')
self.assertIsInstance(body, dict)
diff --git a/tempest/tests/lib/services/identity/v3/test_users_client.py b/tempest/tests/lib/services/identity/v3/test_users_client.py
index 5b572f5..c0dfdae 100644
--- a/tempest/tests/lib/services/identity/v3/test_users_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_users_client.py
@@ -25,6 +25,11 @@
'enabled': True,
'name': 'Tempest User',
'password': 'TempestPassword',
+ "description": "Tempest User",
+ "email": "TempestUser@example.com",
+ "options": {
+ "ignore_password_expiry": True
+ }
}
}
@@ -104,6 +109,38 @@
]
}
+ FAKE_PROJECT_LIST = {
+ "links": {
+ "self": "http://example.com/identity/v3/users/313233/projects",
+ "previous": None,
+ "next": None
+ },
+ "projects": [
+ {
+ "description": "description of this project",
+ "domain_id": "161718",
+ "enabled": True,
+ "id": "456788",
+ "links": {
+ "self": "http://example.com/identity/v3/projects/456788"
+ },
+ "name": "a project name",
+ "parent_id": "212223"
+ },
+ {
+ "description": "description of this project",
+ "domain_id": "161718",
+ "enabled": True,
+ "id": "456789",
+ "links": {
+ "self": "http://example.com/identity/v3/projects/456789"
+ },
+ "name": "another domain",
+ "parent_id": "212223"
+ },
+ ]
+ }
+
def setUp(self):
super(TestUsersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -155,6 +192,15 @@
user_id='817fb3c23fd7465ba6d7fe1b1320121d',
)
+ def _test_list_user_projects(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_user_projects,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_PROJECT_LIST,
+ bytes_body,
+ user_id='817fb3c23fd7465ba6d7fe1b1320121d',
+ )
+
def test_create_user_with_string_body(self):
self._test_create_user()
@@ -185,6 +231,12 @@
def test_list_user_groups_with_bytes_body(self):
self._test_list_user_groups(bytes_body=True)
+ def test_list_user_projects_with_string_body(self):
+ self._test_list_user_projects()
+
+ def test_list_user_projects_with_bytes_body(self):
+ self._test_list_user_projects(bytes_body=True)
+
def test_delete_user(self):
self.check_service_client_function(
self.client.delete_user,
diff --git a/tempest/tests/lib/services/image/v2/test_image_members_client.py b/tempest/tests/lib/services/image/v2/test_image_members_client.py
index 703b6e1..2caa567 100644
--- a/tempest/tests/lib/services/image/v2/test_image_members_client.py
+++ b/tempest/tests/lib/services/image/v2/test_image_members_client.py
@@ -27,6 +27,28 @@
"schema": "/v2/schemas/member"
}
+ FAKE_LIST_IMAGE_MEMBERS = {
+ "members": [
+ {
+ "created_at": "2013-10-07T17:58:03Z",
+ "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe",
+ "member_id": "123456789",
+ "schema": "/v2/schemas/member",
+ "status": "pending",
+ "updated_at": "2013-10-07T17:58:03Z"
+ },
+ {
+ "created_at": "2013-10-07T17:58:55Z",
+ "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe",
+ "member_id": "987654321",
+ "schema": "/v2/schemas/member",
+ "status": "accepted",
+ "updated_at": "2013-10-08T12:08:55Z"
+ }
+ ],
+ "schema": "/v2/schemas/members"
+ }
+
def setUp(self):
super(TestImageMembersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -34,6 +56,14 @@
'image',
'regionOne')
+ def _test_list_image_members(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_image_members,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_IMAGE_MEMBERS,
+ bytes_body,
+ image_id="dbc999e3-c52f-4200-bedd-3b18fe7f87fe")
+
def _test_show_image_member(self, bytes_body=False):
self.check_service_client_function(
self.client.show_image_member,
@@ -62,6 +92,12 @@
member_id="8989447062e04a818baf9e073fd04fa7",
schema="/v2/schemas/member2")
+ def test_list_image_members_with_str_body(self):
+ self._test_list_image_members()
+
+ def test_list_image_members_with_bytes_body(self):
+ self._test_list_image_members(bytes_body=True)
+
def test_show_image_member_with_str_body(self):
self._test_show_image_member()
diff --git a/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py b/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
index 2faa5be..894e178 100644
--- a/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespace_tags_client.py
@@ -118,6 +118,14 @@
def test_show_namespace_tag_with_bytes_body(self):
self._test_show_namespace_tag_definition(bytes_body=True)
+ def test_delete_namespace_tag_definition(self):
+ self.check_service_client_function(
+ self.client.delete_namespace_tag,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=204,
+ namespace="OS::Compute::Hypervisor",
+ tag_name="added-sample-tag")
+
def test_delete_all_namespace_tags(self):
self.check_service_client_function(
self.client.delete_namespace_tags,
diff --git a/tempest/tests/lib/services/image/v2/test_namespaces_client.py b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
index 4cb9d01..3b057ad 100644
--- a/tempest/tests/lib/services/image/v2/test_namespaces_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
@@ -26,6 +26,51 @@
"protected": True
}
+ FAKE_LIST_NAMESPACES = {
+ "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc",
+ "namespaces": [
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "description": "OS::Compute::Libvirt",
+ "display_name": "libvirt Driver Options",
+ "namespace": "OS::Compute::Libvirt",
+ "owner": "admin",
+ "protected": True,
+ "resource_type_associations": [
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "name": "OS::Glance::Image",
+ "updated_at": "2014-08-28T17:13:06Z"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt",
+ "updated_at": "2014-08-28T17:13:06Z",
+ "visibility": "public"
+ },
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "description": "OS::Compute::Quota",
+ "display_name": "Flavor Quota",
+ "namespace": "OS::Compute::Quota",
+ "owner": "admin",
+ "protected": True,
+ "resource_type_associations": [
+ {
+ "created_at": "2014-08-28T17:13:06Z",
+ "name": "OS::Nova::Flavor",
+ "updated_at": "2014-08-28T17:13:06Z"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespace",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Quota",
+ "updated_at": "2014-08-28T17:13:06Z",
+ "visibility": "public"
+ }
+ ],
+ "schema": "/v2/schemas/metadefs/namespaces"
+ }
+
FAKE_UPDATE_NAMESPACE = {
"namespace": "OS::Compute::Hypervisor",
"visibility": "public",
@@ -48,6 +93,13 @@
bytes_body,
namespace="OS::Compute::Hypervisor")
+ def _test_list_namespaces(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_namespaces,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_NAMESPACES,
+ bytes_body)
+
def _test_create_namespace(self, bytes_body=False):
self.check_service_client_function(
self.client.create_namespace,
@@ -74,6 +126,12 @@
def test_show_namespace_with_bytes_body(self):
self._test_show_namespace(bytes_body=True)
+ def test_list_namespaces_with_str_body(self):
+ self._test_list_namespaces()
+
+ def test_list_namespaces_with_bytes_body(self):
+ self._test_list_namespaces(bytes_body=True)
+
def test_create_namespace_with_str_body(self):
self._test_create_namespace()
diff --git a/tempest/tests/lib/services/network/test_agents_client.py b/tempest/tests/lib/services/network/test_agents_client.py
index aabc6ce..8904882 100644
--- a/tempest/tests/lib/services/network/test_agents_client.py
+++ b/tempest/tests/lib/services/network/test_agents_client.py
@@ -22,12 +22,145 @@
FAKE_AGENT_ID = "d32019d3-bc6e-4319-9c1d-6123f4135a88"
+ FAKE_LIST_DATA = {
+ "agents": [
+ {
+ "binary": "neutron-dhcp-agent",
+ "description": None,
+ "availability_zone": "nova",
+ "heartbeat_timestamp": "2017-09-12 19:39:56",
+ "admin_state_up": True,
+ "alive": True,
+ "id": "840d5d68-5759-4e9e-812f",
+ "topic": "dhcp_agent",
+ "host": "agenthost1",
+ "agent_type": "DHCP agent",
+ "started_at": "2017-09-12 19:35:36",
+ "created_at": "2017-09-12 19:35:36",
+ "resources_synced": None,
+ "configurations": {
+ "subnets": 2,
+ "dhcp_lease_duration": 86400,
+ "dhcp_driver": "neutron.agent",
+ "networks": 1,
+ "log_agent_heartbeats": False,
+ "ports": 3
+ }
+ }
+ ]
+ }
+
+ FAKE_SHOW_DATA = {
+ "agent": {
+ "binary": "neutron-openvswitch-agent",
+ "description": None,
+ "availability_zone": None,
+ "heartbeat_timestamp": "2017-09-12 19:40:38",
+ "admin_state_up": True,
+ "alive": True,
+ "id": "04c62b91-b799-48b7-9cd5-2982db6df9c6",
+ "topic": "N/A",
+ "host": "agenthost1",
+ "agent_type": "Open vSwitch agent",
+ "started_at": "2017-09-12 19:35:38",
+ "created_at": "2017-09-12 19:35:38",
+ "resources_synced": True,
+ "configurations": {
+ "ovs_hybrid_plug": True,
+ "in_distributed_mode": False,
+ "datapath_type": "system",
+ "vhostuser_socket_dir": "/var/run/openvswitch",
+ "tunneling_ip": "172.16.78.191",
+ "arp_responder_enabled": False,
+ "devices": 0,
+ "ovs_capabilities": {
+ "datapath_types": [
+ "netdev",
+ "system"
+ ],
+ "iface_types": [
+ "geneve",
+ "gre",
+ "internal",
+ "ipsec_gre",
+ "lisp",
+ "patch",
+ "stt",
+ "system",
+ "tap",
+ "vxlan"
+ ]
+ },
+ "log_agent_heartbeats": False,
+ "l2_population": False,
+ "tunnel_types": [
+ "vxlan"
+ ],
+ "extensions": [],
+ "enable_distributed_routing": False,
+ "bridge_mappings": {
+ "public": "br-ex"
+ }
+ }
+ }
+ }
+
+ FAKE_UPDATE_DATA = {
+ "agent": {
+ "description": "My OVS agent for OpenStack"
+ }
+ }
+
def setUp(self):
super(TestAgentsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.agents_client = agents_client.AgentsClient(
fake_auth, "network", "regionOne")
+ def _test_show_agent(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.show_agent,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_SHOW_DATA,
+ bytes_body,
+ status=200,
+ agent_id=self.FAKE_AGENT_ID)
+
+ def _test_update_agent(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.update_agent,
+ "tempest.lib.common.rest_client.RestClient.put",
+ self.FAKE_UPDATE_DATA,
+ bytes_body,
+ status=200,
+ agent_id=self.FAKE_AGENT_ID)
+
+ def _test_list_agents(self, bytes_body=False):
+ self.check_service_client_function(
+ self.agents_client.list_agents,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_LIST_DATA,
+ bytes_body,
+ status=200)
+
+ def test_show_agent_with_str_body(self):
+ self._test_show_agent()
+
+ def test_show_agent_with_bytes_body(self):
+ self._test_show_agent(bytes_body=True)
+
+ def test_update_agent_with_str_body(self):
+ self._test_update_agent()
+
+ def test_update_agent_with_bytes_body(self):
+ self._test_update_agent(bytes_body=True)
+
+ def test_list_agent_with_str_body(self):
+ self._test_list_agents()
+
+ def test_list_agent_with_bytes_body(self):
+ self._test_list_agents(bytes_body=True)
+
def test_delete_agent(self):
self.check_service_client_function(
self.agents_client.delete_agent,
diff --git a/tempest/tests/lib/services/network/test_routers_client.py b/tempest/tests/lib/services/network/test_routers_client.py
index 2fa5993..f5dcc7d 100644
--- a/tempest/tests/lib/services/network/test_routers_client.py
+++ b/tempest/tests/lib/services/network/test_routers_client.py
@@ -20,37 +20,78 @@
class TestRoutersClient(base.BaseServiceTest):
FAKE_CREATE_ROUTER = {
"router": {
- "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
"external_gateway_info": {
- "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b",
"enable_snat": True,
"external_fixed_ips": [
{
- "subnet_id": "255.255.255.0",
- "ip": "192.168.10.1"
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
}
- ]
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
- "admin_state_up": True,
- "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "routes": [],
+ "revision_number": 1,
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": None,
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
}
}
FAKE_UPDATE_ROUTER = {
"router": {
- "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
"external_gateway_info": {
- "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b",
"enable_snat": True,
"external_fixed_ips": [
{
- "subnet_id": "255.255.255.0",
- "ip": "192.168.10.1"
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
}
- ]
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
- "admin_state_up": False,
- "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "revision_number": 3,
+ "routes": [
+ {
+ "destination": "179.24.1.0/24",
+ "nexthop": "172.24.3.99"
+ }
+ ],
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": None,
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
}
}
diff --git a/tempest/tests/lib/services/object_storage/test_capabilities_client.py b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
index b7f972a..9df7c7c 100644
--- a/tempest/tests/lib/services/object_storage/test_capabilities_client.py
+++ b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
@@ -43,7 +43,7 @@
}
self.check_service_client_function(
self.client.list_capabilities,
- 'tempest.lib.common.rest_client.RestClient.get',
+ 'tempest.lib.common.rest_client.RestClient.raw_request',
resp,
bytes_body)
diff --git a/tempest/tests/lib/services/volume/v3/test_backups_client.py b/tempest/tests/lib/services/volume/v3/test_backups_client.py
index 5412064..97e1132 100644
--- a/tempest/tests/lib/services/volume/v3/test_backups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_backups_client.py
@@ -60,8 +60,11 @@
],
"name": "backup001",
"object_count": 22,
+ "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6",
+ "user_id": "515ba0dd59f84f25a6a084a45d8d93b2",
"size": 1,
"status": "available",
+ "updated_at": "2013-04-02T10:35:27.000000",
"volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
"is_incremental": True,
"has_dependent_backups": False
@@ -73,7 +76,16 @@
"backup": {
"id": "4c65c15f-a5c5-464b-b92a-90e4c04636a7",
"name": "fake-backup-name",
- "links": "fake-links"
+ "links": [
+ {
+ "href": "fake-url-1",
+ "rel": "self"
+ },
+ {
+ "href": "fake-url-2",
+ "rel": "bookmark"
+ }
+ ]
}
}
diff --git a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
index c2784b2..889fd42 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_snapshots_client.py
@@ -20,9 +20,9 @@
class TestGroupSnapshotsClient(base.BaseServiceTest):
FAKE_CREATE_GROUP_SNAPSHOT = {
"group_snapshot": {
- "group_id": "49c8c114-0d68-4e89-b8bc-3f5a674d54be",
- "name": "group-snapshot-001",
- "description": "Test group snapshot 1"
+ "id": "6f519a48-3183-46cf-a32f-41815f816666",
+ "name": "first_group_snapshot",
+ "group_type_id": "58737af7-786b-48b7-ab7c-2447e74b0ef4"
}
}
@@ -34,7 +34,7 @@
"description": "Test group snapshot 1",
"group_type_id": "0e58433f-d108-4bf3-a22c-34e6b71ef86b",
"status": "available",
- "created_at": "20127-06-20T03:50:07Z"
+ "created_at": "2017-06-20T03:50:07Z"
}
}
@@ -102,8 +102,7 @@
resp_body = {
'group_snapshots': [{
'id': group_snapshot['id'],
- 'name': group_snapshot['name'],
- 'group_type_id': group_snapshot['group_type_id']}
+ 'name': group_snapshot['name']}
for group_snapshot in
self.FAKE_LIST_GROUP_SNAPSHOTS['group_snapshots']
]
diff --git a/tempest/tests/lib/services/volume/v3/test_groups_client.py b/tempest/tests/lib/services/volume/v3/test_groups_client.py
index 5a5ae88..8a2c4ea 100644
--- a/tempest/tests/lib/services/volume/v3/test_groups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_groups_client.py
@@ -59,11 +59,11 @@
"volume_types": ["2103099d-7cc3-4e52-a2f1-23a5284416f3"],
"status": "available",
"availability_zone": "az1",
- "created_at": "20127-06-20T03:50:07Z"
+ "created_at": "2017-06-20T03:50:07Z"
}
}
- FAKE_LIST_GROUPS = {
+ FAKE_LIST_GROUP_DETAILS = {
"groups": [
{
"id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
@@ -100,6 +100,19 @@
]
}
+ FAKE_LIST_GROUPS = {
+ "groups": [
+ {
+ "id": "0e701ab8-1bec-4b9f-b026-a7ba4af13578",
+ "name": "group-001",
+ },
+ {
+ "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
+ "name": "group-002",
+ }
+ ]
+ }
+
def setUp(self):
super(TestGroupsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -123,13 +136,21 @@
bytes_body,
group_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ def _test_list_group_details(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_groups,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_GROUP_DETAILS,
+ bytes_body,
+ detail=True)
+
def _test_list_groups(self, bytes_body=False):
self.check_service_client_function(
self.client.list_groups,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_GROUPS,
bytes_body,
- detail=True)
+ detail=False)
def test_create_group_with_str_body(self):
self._test_create_group()
@@ -143,6 +164,12 @@
def test_show_group_with_bytes_body(self):
self._test_show_group(bytes_body=True)
+ def test_list_group_details_with_str_body(self):
+ self._test_list_group_details()
+
+ def test_list_group_details_with_bytes_body(self):
+ self._test_list_group_details(bytes_body=True)
+
def test_list_groups_with_str_body(self):
self._test_list_groups()
diff --git a/tempest/tests/lib/services/volume/v3/test_hosts_client.py b/tempest/tests/lib/services/volume/v3/test_hosts_client.py
index 09bc0b1..8033e38 100644
--- a/tempest/tests/lib/services/volume/v3/test_hosts_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_hosts_client.py
@@ -48,7 +48,7 @@
"total_volume_gb": "2",
"total_snapshot_gb": "0",
"project": "(total)",
- "host": "fake-host",
+ "host": "fake-host@rbd",
"snapshot_count": "0"
}
},
@@ -58,7 +58,7 @@
"total_volume_gb": "2",
"total_snapshot_gb": "0",
"project": "f21a9c86d7114bf99c711f4874d80474",
- "host": "fake-host",
+ "host": "fake-host@lvm",
"snapshot_count": "0"
}
}
diff --git a/tempest/tests/lib/services/volume/v3/test_quotas_client.py b/tempest/tests/lib/services/volume/v3/test_quotas_client.py
index aa5d251..f09784c 100644
--- a/tempest/tests/lib/services/volume/v3/test_quotas_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_quotas_client.py
@@ -20,15 +20,26 @@
class TestQuotasClient(base.BaseServiceTest):
FAKE_QUOTAS = {
"quota_set": {
+ "id": '730a1cbd-68ca-4d68-8e09-d603f2dfa72b',
"gigabytes": 5,
"snapshots": 10,
- "volumes": 20
+ "volumes": 20,
+ 'backups': 10,
+ 'groups': 10,
+ 'per_volume_gigabytes': 1000,
+ 'backup_gigabytes': 2000
}
}
- FAKE_UPDATE_QUOTAS_REQUEST = {
+ FAKE_UPDATE_QUOTAS_RESPONSE = {
"quota_set": {
- "security_groups": 45
+ "gigabytes": 6,
+ "snapshots": 11,
+ "volumes": 21,
+ 'backups': 11,
+ 'groups': 11,
+ 'per_volume_gigabytes': 1001,
+ 'backup_gigabytes': 2001
}
}
@@ -57,7 +68,7 @@
self.check_service_client_function(
self.client.update_quota_set,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_QUOTAS_REQUEST,
+ self.FAKE_UPDATE_QUOTAS_RESPONSE,
bytes_body, tenant_id="fake_tenant")
def test_show_default_quota_set_with_str_body(self):
diff --git a/tempest/tests/lib/services/volume/v3/test_snapshots_client.py b/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
index 2efd2e6..1ea4c65 100644
--- a/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_snapshots_client.py
@@ -20,61 +20,85 @@
class TestSnapshotsClient(base.BaseServiceTest):
FAKE_CREATE_SNAPSHOT = {
"snapshot": {
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "force": True
+ "created_at": "2019-03-11T16:24:34.469003",
+ "description": "Daily backup",
+ "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e"
}
}
- FAKE_UPDATE_SNAPSHOT_REQUEST = {
- "metadata": {
- "key": "v1"
+ FAKE_UPDATE_SNAPSHOT_RESPONSE = {
+ "snapshot": {
+ "created_at": "2019-03-12T04:53:53.426591",
+ "description": "This is yet, another snapshot.",
+ "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-002",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "070c942d-9909-42e9-a467-7a781f150c58"
}
}
FAKE_INFO_SNAPSHOT = {
"snapshot": {
- "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "status": "available",
- "size": 30,
- "created_at": "2012-02-29T03:50:07Z"
+ "created_at": "2019-03-12T04:42:00.809352",
+ "description": "Daily backup",
+ "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ "metadata": {
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "os-extended-snapshot-attributes:progress": "0%",
+ "os-extended-snapshot-attributes:project_id":
+ "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37"
}
}
FAKE_LIST_SNAPSHOTS = {
"snapshots": [
{
- "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- "display_name": "snap-001",
- "display_description": "Daily backup",
- "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
- "status": "available",
- "size": 30,
- "created_at": "2012-02-29T03:50:07Z",
+ "created_at": "2019-03-11T16:24:36.464445",
+ "description": "Daily backup",
+ "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
- "contents": "junk"
- }
- },
- {
- "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
- "display_name": "snap-002",
- "display_description": "Weekly backup",
- "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
- "status": "available",
- "size": 25,
- "created_at": "2012-03-19T01:52:47Z",
- "metadata": {}
+ "key": "v3"
+ },
+ "name": "snap-001",
+ "os-extended-snapshot-attributes:progress": "0%",
+ "os-extended-snapshot-attributes:project_id":
+ "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
+ "size": 10,
+ "status": "creating",
+ "updated_at": None,
+ "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6"
}
]
}
FAKE_SNAPSHOT_METADATA_ITEM = {
+ "metadata": {
+ "key": "value"
+ }
+ }
+
+ FAKE_SNAPSHOT_KEY = {
"meta": {
- "key1": "value1"
+ "key": "new_value"
}
}
@@ -99,7 +123,7 @@
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_INFO_SNAPSHOT,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_list_snapshots(self, bytes_body=False):
self.check_service_client_function(
@@ -113,48 +137,48 @@
self.check_service_client_function(
self.client.create_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.post',
- self.FAKE_INFO_SNAPSHOT,
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
- metadata={"key": "v1"})
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
+ metadata={"key": "value"})
def _test_update_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
+ self.FAKE_UPDATE_SNAPSHOT_RESPONSE,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_show_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_update_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_UPDATE_SNAPSHOT_REQUEST,
- bytes_body, snapshot_id="cbc36478b0bd8e67e89")
+ self.FAKE_SNAPSHOT_METADATA_ITEM,
+ bytes_body, snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c")
def _test_update_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.put',
- self.FAKE_INFO_SNAPSHOT,
+ self.FAKE_SNAPSHOT_KEY,
bytes_body, volume_type_id="cbc36478b0bd8e67e89")
def _test_show_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_SNAPSHOT_METADATA_ITEM,
+ self.FAKE_SNAPSHOT_KEY,
bytes_body,
- snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ snapshot_id="4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
id="key1")
def test_create_snapshot_with_str_body(self):
diff --git a/tempest/tests/lib/services/volume/v3/test_versions_client.py b/tempest/tests/lib/services/volume/v3/test_versions_client.py
index b9abd45..575cae3 100644
--- a/tempest/tests/lib/services/volume/v3/test_versions_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_versions_client.py
@@ -97,6 +97,14 @@
'volume',
'regionOne')
+ def _test_get_base_version_url(self, url, expected_base_url):
+ fake_auth = fake_auth_provider.FakeAuthProvider(fake_base_url=url)
+ client = versions_client.VersionsClient(fake_auth,
+ 'volume',
+ 'regionOne')
+ self.assertEqual(expected_base_url,
+ client._get_base_version_url())
+
def _test_list_versions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_versions,
@@ -105,22 +113,30 @@
bytes_body,
300)
+ def _test_show_version(self, version, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_version,
+ 'tempest.lib.common.rest_client.RestClient.raw_request',
+ self.FAKE_VERSION_DETAILS,
+ bytes_body,
+ 200, version=version)
+
def test_list_versions_with_str_body(self):
self._test_list_versions()
def test_list_versions_with_bytes_body(self):
self._test_list_versions(bytes_body=True)
- def _test_show_version(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_version,
- 'tempest.lib.common.rest_client.RestClient.get',
- self.FAKE_VERSION_DETAILS,
- bytes_body,
- 200, version='v3')
-
def test_show_version_details_with_str_body(self):
- self._test_show_version()
+ self._test_show_version('v3')
def test_show_version_details_with_bytes_body(self):
- self._test_show_version(bytes_body=True)
+ self._test_show_version('v3', bytes_body=True)
+
+ def test_get_base_version_url_app_name(self):
+ self._test_get_base_version_url('https://bar.org/volume/v1/123',
+ 'https://bar.org/volume/')
+ self._test_get_base_version_url('https://bar.org/volume/v2/123',
+ 'https://bar.org/volume/')
+ self._test_get_base_version_url('https://bar.org/volume/v3/123',
+ 'https://bar.org/volume/')
diff --git a/tempest/tests/lib/services/volume/v3/test_volumes_client.py b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
index 1250536..56c1a35 100644
--- a/tempest/tests/lib/services/volume/v3/test_volumes_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
@@ -24,27 +24,25 @@
FAKE_VOLUME_SUMMARY = {
"volume-summary": {
- "total_size": 20,
- "total_count": 5
+ "total_size": 4,
+ "total_count": 4,
+ "metadata": {
+ "key1": ["value1", "value2"],
+ "key2": ["value2"]
+ }
}
}
FAKE_VOLUME_METADATA_ITEM = {
"meta": {
- "key1": "value1"
+ "name": "metadata1"
}
}
FAKE_VOLUME_IMAGE_METADATA = {
"metadata": {
- "container_format": "bare",
- "min_ram": "0",
- "disk_format": "raw",
- "image_name": "xly-ubuntu16-server",
- "image_id": "3e087b0c-10c5-4255-b147-6e8e9dbad6fc",
- "checksum": "008f5d22fe3cb825d714da79607a90f9",
- "min_disk": "0",
- "size": "8589934592"
+ "key1": "value1",
+ "key2": "value2"
}
}
diff --git a/tempest/tests/lib/test_base.py b/tempest/tests/lib/test_base.py
index 27cda1a..2c16e1c 100644
--- a/tempest/tests/lib/test_base.py
+++ b/tempest/tests/lib/test_base.py
@@ -48,6 +48,7 @@
@classmethod
def setUpClass(cls): # noqa
"""Simulate absence of super() call."""
+ cls.orig_skip_exception = cls.skipException
def setUp(self):
try:
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index 37fe646..c849231 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -170,7 +170,8 @@
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_timeout_in_exec_command(self):
- chan_mock, poll_mock, _ = self._set_mocks_for_select([0, 0, 0], True)
+ chan_mock, poll_mock, _, _ = (
+ self._set_mocks_for_select([0, 0, 0], True))
# Test for a timeout condition immediately raised
client = ssh.Client('localhost', 'root', timeout=2)
@@ -187,7 +188,7 @@
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_exec_command(self):
- chan_mock, poll_mock, select_mock = (
+ chan_mock, poll_mock, select_mock, client_mock = (
self._set_mocks_for_select([[1, 0, 0]], True))
chan_mock.recv_exit_status.return_value = 0
@@ -211,6 +212,8 @@
chan_mock.recv_stderr.assert_called_once_with(1024)
chan_mock.recv_exit_status.assert_called_once_with()
+ client_mock.close.assert_called_once_with()
+
def _set_mocks_for_select(self, poll_data, ito_value=False):
gsc_mock = self.patch('tempest.lib.common.ssh.Client.'
'_get_ssh_connection')
@@ -235,14 +238,15 @@
else:
poll_mock.poll.return_value = poll_data
- return chan_mock, poll_mock, select_mock
+ return chan_mock, poll_mock, select_mock, client_mock
_utf8_string = six.unichr(1071)
_utf8_bytes = _utf8_string.encode("utf-8")
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_exec_good_command_output(self):
- chan_mock, poll_mock, _ = self._set_mocks_for_select([1, 0, 0])
+ chan_mock, poll_mock, _, _ = (
+ self._set_mocks_for_select([1, 0, 0]))
closed_prop = mock.PropertyMock(return_value=True)
type(chan_mock).closed = closed_prop
@@ -257,7 +261,8 @@
@mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
def test_exec_bad_command_output(self):
- chan_mock, poll_mock, _ = self._set_mocks_for_select([1, 0, 0])
+ chan_mock, poll_mock, _, _ = (
+ self._set_mocks_for_select([1, 0, 0]))
closed_prop = mock.PropertyMock(return_value=True)
type(chan_mock).closed = closed_prop
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index fc50736..49fd010 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -532,13 +532,14 @@
# If a skip condition is hit in the test, no credentials or resource
# is provisioned / cleaned-up
self.mocks['skip_checks'].side_effect = (
- testtools.testcase.TestSkipped())
+ testtools.TestCase.skipException())
suite = unittest.TestSuite((self.test,))
log = []
result = LoggingTestResult(log)
suite.run(result)
# If we trigger a skip condition, teardown is not invoked at all
- self.assertEqual(self.SETUP_FIXTURES[:2],
+ self.assertEqual((self.SETUP_FIXTURES[:2] +
+ [self.TEARDOWN_FIXTURES[0]]),
self.test.fixtures_invoked)
def test_skip_credentials_fails(self):
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index d4ffd01..64adcbe 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -35,9 +35,8 @@
# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
# when the patches are merged.
BLACKLIST = [
- 'openstack/barbican-tempest-plugin',
- # https://review.opendev.org/#/c/634631/
'x/gce-api', # It looks gce-api doesn't support python3 yet.
+ 'x/group-based-policy', # It looks this doesn't support python3 yet.
'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
'openstack/networking-generic-switch',
# https://review.opendev.org/#/c/634846/
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-blacklist.txt
index 9566f69..263b2e4 100644
--- a/tools/tempest-integrated-gate-networking-blacklist.txt
+++ b/tools/tempest-integrated-gate-networking-blacklist.txt
@@ -3,7 +3,9 @@
# Skip Cinder, Glance, keystone and Swift API tests.
tempest.api.volume
+tempest.api.compute.volumes
tempest.api.image
+tempest.api.compute.images
tempest.api.object_storage
tempest.api.identity
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index a087a4c..b484a41 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -60,6 +60,9 @@
fi
}
+: ${UPPER_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
+DEPS="-c${UPPER_CONSTRAINTS_FILE}"
+
# function to create virtualenv to perform sanity operation
function prepare_workspace {
SANITY_DIR=$(pwd)
@@ -73,10 +76,10 @@
# Function to install project
function install_project {
- "$TVENV" pip install "$SANITY_DIR"/"$1"
+ "$TVENV" pip install $DEPS "$SANITY_DIR"/"$1"
# Check for test-requirements.txt file in a project then install it.
if [ -e "$SANITY_DIR"/"$1"/test-requirements.txt ]; then
- "$TVENV" pip install -r "$SANITY_DIR"/"$1"/test-requirements.txt
+ "$TVENV" pip install $DEPS -r "$SANITY_DIR"/"$1"/test-requirements.txt
fi
}
diff --git a/tox.ini b/tox.ini
index ca4bb3f..64921ef 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,9 +1,11 @@
[tox]
-envlist = pep8,py36,py37,py27,bashate,pip-check-reqs
-minversion = 2.3.1
+envlist = pep8,py36,py37,bashate,pip-check-reqs
+minversion = 3.1.1
skipsdist = True
+ignore_basepython_conflict = True
[tempestenv]
+basepython = python3
sitepackages = False
setenv =
VIRTUAL_ENV={envdir}
@@ -13,6 +15,7 @@
-r{toxinidir}/requirements.txt
[testenv]
+basepython = python3
setenv =
VIRTUAL_ENV={envdir}
OS_LOG_CAPTURE=1
@@ -49,12 +52,12 @@
coverage report
[testenv:debug]
-basepython = python3
commands = oslo_debug_helper -t tempest/tests {posargs}
[testenv:all]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
# 'all' includes slow tests
setenv =
{[tempestenv]setenv}
@@ -77,6 +80,7 @@
setenv =
{[tempestenv]setenv}
OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
+basepython = {[tempestenv]basepython}
deps = {[tempestenv]deps}
commands =
echo "WARNING: The all-plugin env is deprecated and will be removed"
@@ -90,6 +94,7 @@
setenv =
{[tempestenv]setenv}
OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
+basepython = {[tempestenv]basepython}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
@@ -98,6 +103,7 @@
[testenv:full]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
@@ -111,6 +117,7 @@
[testenv:full-parallel]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select all tempest scenario and including the non slow api tests
@@ -121,6 +128,7 @@
[testenv:integrated-network]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
@@ -133,6 +141,7 @@
[testenv:integrated-compute]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
@@ -145,6 +154,7 @@
[testenv:integrated-placement]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
@@ -157,6 +167,7 @@
[testenv:integrated-storage]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
@@ -169,6 +180,7 @@
[testenv:integrated-object-storage]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
@@ -181,6 +193,7 @@
[testenv:full-serial]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
@@ -193,6 +206,7 @@
[testenv:scenario]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select all scenario tests
@@ -203,6 +217,7 @@
[testenv:smoke]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands =
@@ -212,6 +227,7 @@
[testenv:smoke-serial]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# This is still serial because neutron doesn't work with parallel. See:
@@ -224,6 +240,7 @@
[testenv:slow-serial]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select the slow tagged tests to run serially:
@@ -234,6 +251,7 @@
[testenv:ipv6-only]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# Run only smoke and ipv6 tests. This env is used to tests
@@ -253,12 +271,12 @@
[testenv:venv-tempest]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands = {posargs}
[testenv:docs]
-basepython = python3
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
@@ -268,11 +286,18 @@
sphinx-build -W -b html doc/source doc/build/html
whitelist_externals = rm
+[testenv:pdf-docs]
+deps = {[testenv:docs]deps}
+whitelist_externals =
+ make
+commands =
+ sphinx-build -W -b latex doc/source doc/build/pdf
+ make -C doc/build/pdf
+
[testenv:pep8]
deps =
-r{toxinidir}/test-requirements.txt
autopep8
-basepython = python3
commands =
autopep8 --exit-code --max-line-length=79 --experimental --diff -r tempest setup.py
flake8 {posargs}
@@ -280,7 +305,6 @@
[testenv:autopep8]
deps = autopep8
-basepython = python3
commands =
{toxinidir}/tools/format.sh
@@ -304,7 +328,6 @@
import-order-style = pep8
[testenv:releasenotes]
-basepython = python3
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
@@ -316,7 +339,6 @@
whitelist_externals = rm
[testenv:bashate]
-basepython = python3
# if you want to test out some changes you have made to bashate
# against tempest, just set BASHATE_INSTALL_PATH=/path/... to your
# modified bashate tree
@@ -351,7 +373,6 @@
[testenv:plugin-sanity-check]
# perform tempest plugin sanity
-basepython = python3
whitelist_externals = bash
commands =
bash tools/tempest-plugin-sanity.sh