Merge "[Trivial]Remove unused variable assignments"
diff --git a/.gitreview b/.gitreview
index 84b5114..a475594 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=review.openstack.org
+host=review.opendev.org
port=29418
project=openstack/tempest.git
diff --git a/.zuul.yaml b/.zuul.yaml
index 8ff151f..e3210fe 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -8,10 +8,10 @@
test setup. To run a multi-node test inherit from devstack-tempest and
set the nodeset to a multi-node one.
required-projects:
- - git.openstack.org/openstack/tempest
+ - opendev.org/openstack/tempest
timeout: 7200
roles:
- - zuul: git.openstack.org/openstack-dev/devstack
+ - zuul: opendev.org/openstack/devstack
vars:
devstack_services:
tempest: true
@@ -55,10 +55,10 @@
description: |
Base Tempest IPv6 job.
required-projects:
- - git.openstack.org/openstack/tempest
+ - opendev.org/openstack/tempest
timeout: 7200
roles:
- - zuul: git.openstack.org/openstack-dev/devstack
+ - zuul: opendev.org/openstack/devstack
vars:
devstack_services:
tempest: true
@@ -113,24 +113,24 @@
periodic-tempest-dsvm-oslo-latest-full-master.
timeout: 10800
required-projects:
- - git.openstack.org/openstack/oslo.cache
- - git.openstack.org/openstack/oslo.concurrency
- - git.openstack.org/openstack/oslo.config
- - git.openstack.org/openstack/oslo.context
- - git.openstack.org/openstack/oslo.db
- - git.openstack.org/openstack/oslo.i18n
- - git.openstack.org/openstack/oslo.log
- - git.openstack.org/openstack/oslo.messaging
- - git.openstack.org/openstack/oslo.middleware
- - git.openstack.org/openstack/oslo.policy
- - git.openstack.org/openstack/oslo.privsep
- - git.openstack.org/openstack/oslo.reports
- - git.openstack.org/openstack/oslo.rootwrap
- - git.openstack.org/openstack/oslo.serialization
- - git.openstack.org/openstack/oslo.service
- - git.openstack.org/openstack/oslo.utils
- - git.openstack.org/openstack/oslo.versionedobjects
- - git.openstack.org/openstack/oslo.vmware
+ - opendev.org/openstack/oslo.cache
+ - opendev.org/openstack/oslo.concurrency
+ - opendev.org/openstack/oslo.config
+ - opendev.org/openstack/oslo.context
+ - opendev.org/openstack/oslo.db
+ - opendev.org/openstack/oslo.i18n
+ - opendev.org/openstack/oslo.log
+ - opendev.org/openstack/oslo.messaging
+ - opendev.org/openstack/oslo.middleware
+ - opendev.org/openstack/oslo.policy
+ - opendev.org/openstack/oslo.privsep
+ - opendev.org/openstack/oslo.reports
+ - opendev.org/openstack/oslo.rootwrap
+ - opendev.org/openstack/oslo.serialization
+ - opendev.org/openstack/oslo.service
+ - opendev.org/openstack/oslo.utils
+ - opendev.org/openstack/oslo.versionedobjects
+ - opendev.org/openstack/oslo.vmware
- job:
name: tempest-full-parallel
@@ -194,12 +194,8 @@
c-bak: false
- job:
- name: tempest-multinode-full
+ name: tempest-multinode-full-base
parent: devstack-tempest
- nodeset: openstack-two-node-bionic
- # Until the devstack changes are backported, only run this on master
- branches:
- - master
description: |
Base multinode integration test with Neutron networking and py27.
Former names for this job were:
@@ -225,36 +221,36 @@
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-bionic
+ # This job runs on Bionic from stable/stein on.
+ branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-xenial
+ # This job runs on Xenial and this is for stable/pike, stable/queens
+ # and stable/rocky. This job is prepared to make sure all stable branches
+ # before stable/stein will keep running on xenial. This job can be
+ # removed once stable/rocky is EOL.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+
+- job:
name: tempest-multinode-full-py3
parent: tempest-multinode-full
vars:
devstack_localrc:
USE_PYTHON3: true
-- nodeset:
- name: openstack-bionic-node
- nodes:
- - name: controller
- label: ubuntu-bionic
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
- name: openstack-opensuse150-node
- nodes:
- - name: controller
- label: opensuse-150
- groups:
- - name: tempest
- nodes:
- - controller
-
- job:
name: tempest-full-py3-opensuse150
parent: tempest-full-py3
- nodeset: openstack-opensuse150-node
+ nodeset: devstack-single-node-opensuse-150
description: |
Base integration test with Neutron networking and py36 running
on openSUSE Leap 15.0
@@ -263,8 +259,6 @@
- job:
name: tempest-slow
parent: tempest-multinode-full
- branches:
- - master
description: |
This multinode integration job will run all the tests tagged as slow.
It enables the lvm multibackend setup to cover few scenario tests.
@@ -280,6 +274,13 @@
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
ENABLE_VOLUME_MULTIATTACH: true
tempest_concurrency: 2
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
- job:
name: tempest-slow-py3
@@ -296,6 +297,16 @@
c-bak: false
- job:
+ name: tempest-full-stein
+ parent: tempest-full
+ override-checkout: stable/stein
+
+- job:
+ name: tempest-full-stein-py3
+ parent: tempest-full-py3
+ override-checkout: stable/stein
+
+- job:
name: tempest-full-rocky
parent: tempest-full
nodeset: openstack-single-node-xenial
@@ -330,7 +341,7 @@
parent: tox
description: |
Run tempest plugin sanity check script using tox.
- nodeset: ubuntu-xenial
+ nodeset: ubuntu-bionic
vars:
tox_envlist: plugin-sanity-check
voting: false
@@ -343,80 +354,82 @@
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
required-projects:
- - git.openstack.org/openstack/airship-tempest-plugin
- - git.openstack.org/openstack/almanach
- - git.openstack.org/openstack/aodh
- - git.openstack.org/openstack/barbican-tempest-plugin
- - git.openstack.org/openstack/blazar-tempest-plugin
- - git.openstack.org/openstack/ceilometer
- - git.openstack.org/openstack/cinder-tempest-plugin
- - git.openstack.org/openstack/cloudkitty-tempest-plugin
- - git.openstack.org/openstack/congress-tempest-plugin
- - git.openstack.org/openstack/designate-tempest-plugin
- - git.openstack.org/openstack/ec2api-tempest-plugin
- - git.openstack.org/openstack/freezer
- - git.openstack.org/openstack/freezer-api
- - git.openstack.org/openstack/freezer-tempest-plugin
- - git.openstack.org/openstack/gabbi-tempest
- - git.openstack.org/openstack/gce-api
- - git.openstack.org/openstack/glare
- - git.openstack.org/openstack/heat-tempest-plugin
- - git.openstack.org/openstack/intel-nfv-ci-tests
- - git.openstack.org/openstack/ironic-tempest-plugin
- - git.openstack.org/openstack/ironic-inspector
- - git.openstack.org/openstack/keystone-tempest-plugin
- - git.openstack.org/openstack/kingbird
- - git.openstack.org/openstack/kuryr-tempest-plugin
- - git.openstack.org/openstack/magnum
- - git.openstack.org/openstack/magnum-tempest-plugin
- - git.openstack.org/openstack/manila
- - git.openstack.org/openstack/manila-tempest-plugin
- - git.openstack.org/openstack/mistral-tempest-plugin
- - git.openstack.org/openstack/mogan
- - git.openstack.org/openstack/monasca-api
- - git.openstack.org/openstack/monasca-log-api
- - git.openstack.org/openstack/monasca-tempest-plugin
- - git.openstack.org/openstack/murano-tempest-plugin
- - git.openstack.org/openstack/networking-ansible
- - git.openstack.org/openstack/networking-bgpvpn
- - git.openstack.org/openstack/networking-cisco
- - git.openstack.org/openstack/networking-fortinet
- - git.openstack.org/openstack/networking-generic-switch
- - git.openstack.org/openstack/networking-l2gw-tempest-plugin
- - git.openstack.org/openstack/networking-midonet
- - git.openstack.org/openstack/networking-sfc
- - git.openstack.org/openstack/networking-spp
- - git.openstack.org/openstack/neutron
- - git.openstack.org/openstack/neutron-dynamic-routing
- - git.openstack.org/openstack/neutron-fwaas
- - git.openstack.org/openstack/neutron-lbaas
- - git.openstack.org/openstack/neutron-tempest-plugin
- - git.openstack.org/openstack/neutron-vpnaas
- - git.openstack.org/openstack/nova-lxd
- - git.openstack.org/openstack/novajoin-tempest-plugin
- - git.openstack.org/openstack/octavia
- - git.openstack.org/openstack/octavia-tempest-plugin
- - git.openstack.org/openstack/oswin-tempest-plugin
- - git.openstack.org/openstack/panko
- - git.openstack.org/openstack/patrole
- - git.openstack.org/openstack/python-watcherclient
- - git.openstack.org/openstack/qinling
- - git.openstack.org/openstack/requirements
- - git.openstack.org/openstack/sahara-tests
- - git.openstack.org/openstack/senlin
- - git.openstack.org/openstack/senlin-tempest-plugin
- - git.openstack.org/openstack/tap-as-a-service
- - git.openstack.org/openstack/telemetry-tempest-plugin
- - git.openstack.org/openstack/tempest-horizon
- - git.openstack.org/openstack/tobiko
- - git.openstack.org/openstack/tripleo-common-tempest-plugin
- - git.openstack.org/openstack/trove-tempest-plugin
- - git.openstack.org/openstack/valet
- - git.openstack.org/openstack/vitrage-tempest-plugin
- - git.openstack.org/openstack/vmware-nsx-tempest-plugin
- - git.openstack.org/openstack/watcher-tempest-plugin
- - git.openstack.org/openstack/zaqar-tempest-plugin
- - git.openstack.org/openstack/zun-tempest-plugin
+ - opendev.org/airship/tempest-plugin
+ - opendev.org/x/almanach
+ - opendev.org/openstack/aodh
+ - opendev.org/openstack/barbican-tempest-plugin
+ - opendev.org/openstack/blazar-tempest-plugin
+ - opendev.org/openstack/ceilometer
+ - opendev.org/openstack/cinder-tempest-plugin
+ - opendev.org/openstack/cloudkitty-tempest-plugin
+ - opendev.org/openstack/congress-tempest-plugin
+ - opendev.org/openstack/designate-tempest-plugin
+ - opendev.org/openstack/ec2api-tempest-plugin
+ - opendev.org/openstack/freezer
+ - opendev.org/openstack/freezer-api
+ - opendev.org/openstack/freezer-tempest-plugin
+ - opendev.org/x/gabbi-tempest
+ - opendev.org/x/gce-api
+ - opendev.org/x/glare
+ - opendev.org/openstack/heat-tempest-plugin
+ - opendev.org/x/intel-nfv-ci-tests
+ - opendev.org/openstack/ironic-tempest-plugin
+ - opendev.org/openstack/ironic-inspector
+ - opendev.org/openstack/keystone-tempest-plugin
+ - opendev.org/x/kingbird
+ - opendev.org/openstack/kuryr-tempest-plugin
+ - opendev.org/openstack/magnum
+ - opendev.org/openstack/magnum-tempest-plugin
+ - opendev.org/openstack/manila
+ - opendev.org/openstack/manila-tempest-plugin
+ - opendev.org/openstack/mistral-tempest-plugin
+ - opendev.org/x/mogan
+ - opendev.org/openstack/monasca-api
+ - opendev.org/openstack/monasca-log-api
+ - opendev.org/openstack/monasca-tempest-plugin
+ - opendev.org/openstack/murano-tempest-plugin
+ - opendev.org/x/networking-ansible
+ - opendev.org/openstack/networking-bgpvpn
+ - opendev.org/x/networking-cisco
+ - opendev.org/x/networking-fortinet
+ - opendev.org/openstack/networking-generic-switch
+ - opendev.org/openstack/networking-l2gw-tempest-plugin
+ - opendev.org/openstack/networking-midonet
+ - opendev.org/openstack/networking-sfc
+ - opendev.org/x/networking-spp
+ - opendev.org/openstack/neutron
+ - opendev.org/openstack/neutron-dynamic-routing
+ - opendev.org/openstack/neutron-fwaas
+ - opendev.org/openstack/neutron-lbaas
+ - opendev.org/openstack/neutron-tempest-plugin
+ - opendev.org/openstack/neutron-vpnaas
+ - opendev.org/x/nova-lxd
+ - opendev.org/x/novajoin-tempest-plugin
+ - opendev.org/openstack/octavia
+ - opendev.org/openstack/octavia-tempest-plugin
+ - opendev.org/openstack/oswin-tempest-plugin
+ - opendev.org/openstack/panko
+ - opendev.org/openstack/patrole
+ - opendev.org/openstack/python-watcherclient
+ - opendev.org/openstack/qinling
+ - opendev.org/openstack/requirements
+ - opendev.org/openstack/sahara-tests
+ - opendev.org/openstack/senlin
+ - opendev.org/openstack/senlin-tempest-plugin
+ - opendev.org/openstack/solum-tempest-plugin
+ - opendev.org/x/tap-as-a-service
+ - opendev.org/openstack/telemetry-tempest-plugin
+ - opendev.org/openstack/tempest-horizon
+ - opendev.org/x/tobiko
+ - opendev.org/x/trio2o
+ - opendev.org/openstack/tripleo-common-tempest-plugin
+ - opendev.org/openstack/trove-tempest-plugin
+ - opendev.org/x/valet
+ - opendev.org/openstack/vitrage-tempest-plugin
+ - opendev.org/x/vmware-nsx-tempest-plugin
+ - opendev.org/openstack/watcher-tempest-plugin
+ - opendev.org/openstack/zaqar-tempest-plugin
+ - opendev.org/openstack/zun-tempest-plugin
- job:
name: tempest-cinder-v2-api
@@ -478,6 +491,7 @@
- openstack-python-jobs
- openstack-python35-jobs
- openstack-python36-jobs
+ - openstack-python37-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
@@ -496,7 +510,6 @@
- tempest-full-parallel:
# Define list of irrelevant files to use everywhere else
irrelevant-files: &tempest-irrelevant-files
- - ^(test-|)requirements.txt$
- ^.*\.rst$
- ^doc/.*$
- ^etc/.*$
@@ -510,6 +523,10 @@
- tempest-full-py3-ipv6:
voting: false
irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-stein:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-stein-py3:
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-rocky:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-rocky-py3:
@@ -526,7 +543,6 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-tox-plugin-sanity-check:
irrelevant-files:
- - ^(test-|)requirements.txt$
- ^.*\.rst$
- ^doc/.*$
- ^etc/.*$
@@ -565,8 +581,6 @@
irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr:
irrelevant-files: *tempest-irrelevant-files
- - legacy-tempest-dsvm-neutron-full-ocata:
- irrelevant-files: *tempest-irrelevant-files
- tempest-full:
irrelevant-files: *tempest-irrelevant-files
- interop-tempest-consistency:
@@ -601,7 +615,7 @@
irrelevant-files: *tempest-irrelevant-files
- nova-cells-v1:
irrelevant-files: *tempest-irrelevant-files
- - legacy-tempest-dsvm-nova-v20-api:
+ - nova-tempest-v2-api:
irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-lvm-multibackend:
irrelevant-files: *tempest-irrelevant-files
@@ -613,6 +627,8 @@
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
+ - tempest-full-stein
+ - tempest-full-stein-py3
- tempest-full-rocky
- tempest-full-rocky-py3
- tempest-full-queens
diff --git a/HACKING.rst b/HACKING.rst
index eb6551a..1559fc6 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -28,6 +28,8 @@
- [T117] Check negative tests have ``@decorators.attr(type=['negative'])``
applied.
+It is recommended to use ``tox -eautopep8`` before submitting a patch.
+
Test Data/Configuration
-----------------------
- Assume nothing about existing test data
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 983fa24..4b1c145 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -338,8 +338,8 @@
.. _2.26: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id23
-* `2.28`_
-
+ * `2.28`_
+
.. _2.28: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id25
* `2.32`_
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index e249bdd..ff4fa09 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -128,8 +128,9 @@
people to respond to removal proposals please add things to the agenda by the
Monday before the meeting.
-The other option is to raise the removal on the openstack-dev mailing list.
-(for example see: http://lists.openstack.org/pipermail/openstack-dev/2016-February/086218.html )
+The other option is to raise the removal on the openstack-discuss mailing list.
+(for example see: http://lists.openstack.org/pipermail/openstack-dev/2016-February/086218.html
+or http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003574.html )
This will raise the issue to the wider community and attract at least the same
(most likely more) attention than discussing it during the irc meeting. The
only downside is that it might take more time to get a response, given the
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index b51e701..5f87abd 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -11,7 +11,7 @@
# This enviroment variable is used by the optional tempest-gabbi
# job provided by the gabbi-tempest plugin. It can be safely ignored
# if that plugin is not being used.
- GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path }}"
+ GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
roles:
- setup-tempest-run-dir
- setup-tempest-data-dir
diff --git a/releasenotes/notes/add-profiler-config-options-db7c4ae6d338ee5c.yaml b/releasenotes/notes/add-profiler-config-options-db7c4ae6d338ee5c.yaml
new file mode 100644
index 0000000..2245044
--- /dev/null
+++ b/releasenotes/notes/add-profiler-config-options-db7c4ae6d338ee5c.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Add support of `OSProfiler library`_ for profiling and distributed
+ tracing of OpenStack. A new config option ``key`` in section ``profiler``
+ is added, the option sets the secret key used to enable profiling in
+ OpenStack services. The value needs to correspond to the one specified
+ in [profiler]/hmac_keys option of OpenStack services.
+
+ .. _OSProfiler library: https://docs.openstack.org/osprofiler/
diff --git a/releasenotes/notes/add-unstable_test-decorator-a73cf97d4ffcc796.yaml b/releasenotes/notes/add-unstable_test-decorator-a73cf97d4ffcc796.yaml
new file mode 100644
index 0000000..2203fd1
--- /dev/null
+++ b/releasenotes/notes/add-unstable_test-decorator-a73cf97d4ffcc796.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ New decorator ``unstable_test`` is added to ``tempest.lib.decorators``.
+ It can be used to mark some test as unstable thus it will be still run
+ by tempest but job will not fail if this test will fail. Such test will
+ be skipped in case of failure.
+ It can be used for example when there is known bug related which cause
+ irregular tests failures. Marking such test as unstable will help other
+ developers to get their job done and still run this test to get additional
+ debug data or to confirm if some potential fix really solved the issue.
diff --git a/releasenotes/notes/bug-1808473-54ada26ab78e7b02.yaml b/releasenotes/notes/bug-1808473-54ada26ab78e7b02.yaml
new file mode 100644
index 0000000..c280198
--- /dev/null
+++ b/releasenotes/notes/bug-1808473-54ada26ab78e7b02.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixed bug #1808473. ``tempest run`` CLI will error if a non-exist config file is
+ input to parameter --config-file. Earlier non-exist config value was silently
+ getting ignored and the default config file was used instead which used to give
+ false behavior to the user on using the passed config file.
diff --git a/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml b/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml
new file mode 100644
index 0000000..c707f14
--- /dev/null
+++ b/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The ``tempest.lib.decorators.attr`` decorator now supports a ``condition``
+ kwarg which can be used to conditionally apply the attr to the test
+ function if the condition evaluates to True.
diff --git a/releasenotes/notes/tempest-stein-release-18bad34136a2e6ef.yaml b/releasenotes/notes/tempest-stein-release-18bad34136a2e6ef.yaml
new file mode 100644
index 0000000..212cf7d
--- /dev/null
+++ b/releasenotes/notes/tempest-stein-release-18bad34136a2e6ef.yaml
@@ -0,0 +1,18 @@
+---
+prelude: >
+ This release is to tag the Tempest for OpenStack Stein release.
+ This release marks the start of Stein release support in Tempest and
+ the end of support for Ocata in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Stein
+ * Rocky
+ * Queens
+ * Pike
+
+ Current development of Tempest is for OpenStack Train development
+ cycle. Every Tempest commit is also tested against master during
+ the Train cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Train (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Stein release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 3be014f..e5d5bfe 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ v20.0.0
v19.0.0
v18.0.0
v17.2.0
diff --git a/releasenotes/source/v20.0.0.rst b/releasenotes/source/v20.0.0.rst
new file mode 100644
index 0000000..28c5431
--- /dev/null
+++ b/releasenotes/source/v20.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v20.0.0 Release Notes
+=====================
+
+.. release-notes:: 20.0.0 Release Notes
+ :version: 20.0.0
diff --git a/requirements.txt b/requirements.txt
index 7520d42..bf38fae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
cliff!=2.9.0,>=2.8.0 # Apache-2.0
-jsonschema<3.0.0,>=2.6.0 # MIT
+jsonschema>=2.6.0 # MIT
testtools>=2.2.0 # MIT
paramiko>=2.0.0 # LGPLv2.1+
netaddr>=0.7.18 # BSD
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index cc83c04..371b506 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -142,6 +142,12 @@
if not CONF.compute_feature_enabled.volume_multiattach:
raise cls.skipException('Volume multi-attach is not available.')
+ @classmethod
+ def setup_clients(cls):
+ super(TestMultiAttachVolumeSwap, cls).setup_clients()
+ # Need this to set readonly volumes.
+ cls.admin_volumes_client = cls.os_admin.volumes_client_latest
+
# NOTE(mriedem): This is an uncommon scenario to call the compute API
# to swap volumes directly; swap volume is primarily only for volume
# live migration and retype callbacks from the volume service, and is slow
@@ -162,6 +168,13 @@
# volumes cleanup can happen successfully irrespective of which volume
# is attached to server.
volume1 = self.create_volume(multiattach=True)
+ # Make volume1 read-only since you can't swap from a volume with
+ # multiple read/write attachments, and you can't change the readonly
+ # flag on an in-use volume so we have to do this before attaching
+ # volume1 to anything. If the compute API ever supports per-attachment
+ # attach modes, then we can handle this differently.
+ self.admin_volumes_client.update_volume_readonly(
+ volume1['id'], readonly=True)
volume2 = self.create_volume(multiattach=True)
# Create two servers and wait for them to be ACTIVE.
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index bea23d9..eeb58d6 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -331,6 +331,16 @@
@decorators.idempotent_id('c7e0e60b-ee45-43d0-abeb-8596fd42a2f9')
@utils.services('network')
def test_add_remove_fixed_ip(self):
+ # NOTE(zhufl) By default only project that is admin or network owner
+ # or project with role advsvc is authorised to add interfaces with
+ # fixed-ip, so if we don't create network for each project, do not
+ # test
+ if not (CONF.auth.use_dynamic_credentials and
+ CONF.auth.create_isolated_networks and
+ not CONF.network.shared_physical_network):
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
+
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
original_interface_count = len(ifs) # This is the number of ports.
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index d40f937..e817587 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from oslo_log import log as logging
+from oslo_serialization import jsonutils as json
from tempest.api.compute import base
from tempest.common import utils
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 5801db1..daf6a06 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -143,7 +143,7 @@
data_length = len(data) if data is not None else 0
self.assertFalse(data_length <= 24 or
data_length != (struct.unpack(">L",
- data[20:24])[0] + 24),
+ data[20:24])[0] + 24),
'Server initialization was not the right format.')
# Since the rest of the data on the screen is arbitrary, we will
# close the socket and end our validation of the data at this point
@@ -151,7 +151,7 @@
# initialization was the right format
self.assertFalse(data_length <= 24 or
data_length != (struct.unpack(">L",
- data[20:24])[0] + 24))
+ data[20:24])[0] + 24))
def _validate_websocket_upgrade(self):
self.assertTrue(
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f3d7476..f6c3e73 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -92,6 +92,7 @@
validatable=True,
validation_resources=validation_resources,
wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, newserver['id'])
# The server's password should be set to the provided password
new_password = 'Newpass1234'
self.client.change_password(newserver['id'], adminPass=new_password)
@@ -288,6 +289,17 @@
self.assertEqual('in-use', vol_after_rebuild['status'])
self.assertEqual(self.server_id,
vol_after_rebuild['attachments'][0]['server_id'])
+ if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.ssh_user,
+ password=None,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.client)
+ linux_client.validate_authentication()
def _test_resize_server_confirm(self, server_id, stop=False):
# The server's RAM and disk space should be modified to that of
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 56d973e..e8b1161 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -19,7 +19,6 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -40,11 +39,7 @@
# If an admin password is provided on server creation, the server's
# root password should be set to that password.
server = self.create_test_server(adminPass='testpassword')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# Verify the password is set correctly in the response
self.assertEqual('testpassword', server['adminPass'])
@@ -59,19 +54,11 @@
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id1 = server['id']
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, id1)
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, id1)
+ self.addCleanup(self.delete_server, id1)
server = self.create_test_server(name=server_name,
wait_until='ACTIVE')
id2 = server['id']
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, id2)
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, id2)
+ self.addCleanup(self.delete_server, id2)
self.assertNotEqual(id1, id2, "Did not create a new server")
server = self.client.show_server(id1)['server']
name1 = server['name']
@@ -87,13 +74,9 @@
self.keypairs_client.create_keypair(name=key_name)
self.addCleanup(self.keypairs_client.delete_keypair, key_name)
self.keypairs_client.list_keypairs()
- server = self.create_test_server(key_name=key_name)
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
- waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
+ server = self.create_test_server(key_name=key_name,
+ wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, server['id'])
server = self.client.show_server(server['id'])['server']
self.assertEqual(key_name, server['key_name'])
@@ -115,11 +98,7 @@
def test_update_server_name(self):
# The server name should be changed to the provided value
server = self.create_test_server(wait_until='ACTIVE')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# Update instance name with non-ASCII characters
prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
self._update_server_name(server['id'], 'ACTIVE', prefix_name)
@@ -137,11 +116,7 @@
def test_update_access_server_address(self):
# The server's access addresses should reflect the provided values
server = self.create_test_server(wait_until='ACTIVE')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# Update the IPv4 and IPv6 access addresses
self.client.update_server(server['id'],
@@ -157,13 +132,9 @@
@decorators.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb')
def test_create_server_with_ipv6_addr_only(self):
# Create a server without an IPv4 address(only IPv6 address).
- server = self.create_test_server(accessIPv6='2001:2001::3')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.servers_client.delete_server, server['id'])
- waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
+ server = self.create_test_server(accessIPv6='2001:2001::3',
+ wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, server['id'])
server = self.client.show_server(server['id'])['server']
self.assertEqual('2001:2001::3', server['accessIPv6'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index f7b5b4b..f83e62c 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -65,6 +65,8 @@
class AttachVolumeTestJSON(BaseAttachVolumeTest):
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
+ # This test is conditionally marked slow if SSH validation is enabled.
+ @decorators.attr(type='slow', condition=CONF.validation.run_validation)
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
@@ -124,7 +126,7 @@
@decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
def test_list_get_volume_attachments(self):
# List volume attachment of the server
- server, _ = self._create_server()
+ server, validation_resources = self._create_server()
volume_1st = self.create_volume()
attachment_1st = self.attach_volume(server, volume_1st)
body = self.servers_client.list_volume_attachments(
@@ -147,6 +149,16 @@
server['id'])['volumeAttachments']
self.assertEqual(2, len(body))
+ if CONF.validation.run_validation:
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.image_ssh_user,
+ self.image_ssh_password,
+ validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+ linux_client.validate_authentication()
+
for attachment in [attachment_1st, attachment_2nd]:
body = self.servers_client.show_volume_attachment(
server['id'], attachment['id'])['volumeAttachment']
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index a79cbc3..73fddb7 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -21,7 +21,7 @@
CONF = config.CONF
-class TestDefaultProjectId (base.BaseIdentityV3AdminTest):
+class TestDefaultProjectId(base.BaseIdentityV3AdminTest):
@classmethod
def setup_credentials(cls):
@@ -57,9 +57,10 @@
# create a user in the domain, with the previous project as his
# default project
user_name = data_utils.rand_name('user')
+ user_pass = data_utils.rand_password()
user_body = self.users_client.create_user(
name=user_name,
- password=user_name,
+ password=user_pass,
domain_id=dom_id,
default_project_id=proj_id)['user']
user_id = user_body['id']
@@ -78,7 +79,7 @@
# create a new client with user's credentials (NOTE: unscoped token!)
creds = auth.KeystoneV3Credentials(username=user_name,
- password=user_name,
+ password=user_pass,
user_domain_name=dom_name)
auth_provider = clients.get_auth_provider(creds)
creds = auth_provider.fill_credentials()
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 72b6be4..07175f4 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -153,18 +153,3 @@
expected_data = {'name': d_name, 'enabled': True}
self.assertEqual('', domain['description'])
self.assertDictContainsSubset(expected_data, domain)
-
-
-class DefaultDomainTestJSON(base.BaseIdentityV3AdminTest):
-
- @classmethod
- def resource_setup(cls):
- cls.domain_id = CONF.identity.default_domain_id
- super(DefaultDomainTestJSON, cls).resource_setup()
-
- @decorators.attr(type='smoke')
- @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
- def test_default_domain_exists(self):
- domain = self.domains_client.show_domain(self.domain_id)['domain']
-
- self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 625568d..7d85dc9 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -69,6 +69,7 @@
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
def test_create_list_show_check_delete_endpoint_group(self):
service_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
filters = {'service_id': service_id}
@@ -129,6 +130,7 @@
# Creating an endpoint group so as to check update endpoint group
# with new values
service1_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service1_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
filters = {'service_id': service1_id}
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index 68c0225..2672f71 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -9,14 +9,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
from tempest.common import utils
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class InheritsV3TestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def skip_checks(cls):
@@ -30,7 +38,7 @@
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
- u_password = data_utils.rand_name('pass-')
+ u_password = data_utils.rand_password()
cls.domain = cls.create_domain()
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project-'),
@@ -43,18 +51,26 @@
domain_id=cls.domain['id'])['group']
cls.addClassResourceCleanup(cls.groups_client.delete_group,
cls.group['id'])
- cls.user = cls.users_client.create_user(
- name=u_name, description=u_desc, password=u_password,
- email=u_email, project_id=cls.project['id'],
- domain_id=cls.domain['id'])['user']
- cls.addClassResourceCleanup(cls.users_client.delete_user,
- cls.user['id'])
+ if not CONF.identity_feature_enabled.immutable_user_source:
+ cls.user = cls.users_client.create_user(
+ name=u_name,
+ description=u_desc,
+ password=u_password,
+ email=u_email,
+ project_id=cls.project['id'],
+ domain_id=cls.domain['id']
+ )['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user['id'])
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
@decorators.idempotent_id('4e6f0366-97c8-423c-b2be-41eae6ac91c8')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_check_revoke_roles_on_domains_user(self):
# Create role
src_role = self.setup_test_role()
@@ -103,6 +119,9 @@
self.domain['id'], self.group['id'], src_role['id'])
@decorators.idempotent_id('18b70e45-7687-4b72-8277-b8f1a47d7591')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_check_revoke_roles_on_projects_user(self):
# Create role
src_role = self.setup_test_role()
@@ -134,6 +153,9 @@
self.project['id'], self.group['id'], src_role['id']))
@decorators.idempotent_id('3acf666e-5354-42ac-8e17-8b68893bcd36')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_revoke_user_roles_on_domain(self):
# Create role
src_role = self.setup_test_role()
@@ -178,6 +200,9 @@
self.assertEmpty(assignments)
@decorators.idempotent_id('9f02ccd9-9b57-46b4-8f77-dd5a736f3a06')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_revoke_user_roles_on_project_tree(self):
# Create role
src_role = self.setup_test_role()
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 50f3186..299a618 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -13,11 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log as logging
+
from tempest.api.identity import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -26,6 +29,9 @@
def _list_projects_with_params(self, included, excluded, params, key):
# Validate that projects in ``included`` belongs to the projects
# returned that match ``params`` but not projects in ``excluded``
+ all_projects = self.projects_client.list_projects()['projects']
+ LOG.debug("Complete list of projects available in keystone: %s",
+ all_projects)
body = self.projects_client.list_projects(params)['projects']
for p in included:
self.assertIn(p[key], map(lambda x: x[key], body))
@@ -39,13 +45,12 @@
def resource_setup(cls):
super(ListProjectsTestJSON, cls).resource_setup()
cls.project_ids = list()
- # Create a domain
- cls.domain = cls.create_domain()
+ cls.domain_id = cls.os_admin.credentials.domain_id
# Create project with domain
cls.p1_name = data_utils.rand_name('project')
cls.p1 = cls.projects_client.create_project(
cls.p1_name, enabled=False,
- domain_id=cls.domain['id'])['project']
+ domain_id=cls.domain_id)['project']
cls.addClassResourceCleanup(cls.projects_client.delete_project,
cls.p1['id'])
cls.project_ids.append(cls.p1['id'])
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 47f663c..5ba4c9f 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -12,15 +12,23 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
+CONF = config.CONF
+
class RolesV3TestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def resource_setup(cls):
@@ -48,16 +56,21 @@
domain_id=cls.domain['id'])['group']
cls.addClassResourceCleanup(cls.groups_client.delete_group,
cls.group_body['id'])
- cls.user_body = cls.users_client.create_user(
- name=u_name, description=u_desc, password=cls.u_password,
- email=u_email, project_id=cls.project['id'],
- domain_id=cls.domain['id'])['user']
- cls.addClassResourceCleanup(cls.users_client.delete_user,
- cls.user_body['id'])
cls.role = cls.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
cls.addClassResourceCleanup(cls.roles_client.delete_role,
cls.role['id'])
+ if not CONF.identity_feature_enabled.immutable_user_source:
+ cls.user_body = cls.users_client.create_user(
+ name=u_name,
+ description=u_desc,
+ email=u_email,
+ password=cls.u_password,
+ domain_id=cls.domain['id'],
+ project_id=cls.project['id']
+ )['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user_body['id'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
@@ -84,6 +97,9 @@
self.assertIn(role['id'], [r['id'] for r in roles])
@decorators.idempotent_id('c6b80012-fe4a-498b-9ce8-eb391c05169f')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_grant_list_revoke_role_to_user_on_project(self):
self.roles_client.create_user_role_on_project(self.project['id'],
self.user_body['id'],
@@ -102,6 +118,9 @@
self.project['id'], self.user_body['id'], self.role['id'])
@decorators.idempotent_id('6c9a2940-3625-43a3-ac02-5dcec62ef3bd')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_grant_list_revoke_role_to_user_on_domain(self):
self.roles_client.create_user_role_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
@@ -119,6 +138,9 @@
self.domain['id'], self.user_body['id'], self.role['id'])
@decorators.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_grant_list_revoke_role_to_group_on_project(self):
# Grant role to group on project
self.roles_client.create_group_role_on_project(
@@ -254,6 +276,9 @@
self.assertIn(self.roles[2]['id'], implies_ids)
@decorators.idempotent_id('c8828027-df48-4021-95df-b65b92c7429e')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_assignments_for_implied_roles_create_delete(self):
# Create a grant using "roles[0]"
self.roles_client.create_user_role_on_project(
@@ -344,6 +369,9 @@
domain_role1['id'])
@decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_assignments_for_domain_roles(self):
domain_role = self.setup_test_role(domain_id=self.domain['id'])
diff --git a/tempest/api/identity/v3/test_domains.py b/tempest/api/identity/v3/test_domains.py
new file mode 100644
index 0000000..9f132dd
--- /dev/null
+++ b/tempest/api/identity/v3/test_domains.py
@@ -0,0 +1,39 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class DefaultDomainTestJSON(base.BaseIdentityV3Test):
+
+ @classmethod
+ def setup_clients(cls):
+ super(DefaultDomainTestJSON, cls).setup_clients()
+ cls.domains_client = cls.os_primary.domains_client
+
+ @classmethod
+ def resource_setup(cls):
+ super(DefaultDomainTestJSON, cls).resource_setup()
+ cls.domain_id = CONF.identity.default_domain_id
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
+ def test_default_domain_exists(self):
+ domain = self.domains_client.show_domain(self.domain_id)['domain']
+ self.assertTrue(domain['enabled'])
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
deleted file mode 100644
index 033bf55..0000000
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.network import base
-from tempest.common import utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-AGENT_TYPE = 'L3 agent'
-AGENT_MODES = (
- 'legacy',
- 'dvr_snat'
-)
-
-
-class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
- """Tests the following operations in the Neutron API:
-
- List routers that the given L3 agent is hosting.
- List L3 agents hosting the given router.
- Add and Remove Router to L3 agent
-
- v2.0 of the Neutron API is assumed.
-
- The l3_agent_scheduler extension is required for these tests.
- """
-
- @classmethod
- def skip_checks(cls):
- super(L3AgentSchedulerTestJSON, cls).skip_checks()
- if not utils.is_extension_enabled('l3_agent_scheduler', 'network'):
- msg = "L3 Agent Scheduler Extension not enabled."
- raise cls.skipException(msg)
-
- @classmethod
- def resource_setup(cls):
- super(L3AgentSchedulerTestJSON, cls).resource_setup()
- agents = cls.admin_agents_client.list_agents(
- agent_type=AGENT_TYPE)['agents']
- for agent in agents:
- if (agent['configurations']['agent_mode'] in AGENT_MODES and
- agent['alive']):
- cls.agent = agent
- break
- else:
- msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
- raise exceptions.InvalidConfiguration(msg)
- cls.router = cls.create_router()
-
- @decorators.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
- def test_list_routers_on_l3_agent(self):
- self.admin_agents_client.list_routers_on_l3_agent(self.agent['id'])
-
- @decorators.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
- def test_add_list_remove_router_on_l3_agent(self):
- l3_agent_ids = list()
- self.admin_agents_client.create_router_on_l3_agent(
- self.agent['id'],
- router_id=self.router['id'])
- body = (
- self.admin_routers_client.list_l3_agents_hosting_router(
- self.router['id']))
- for agent in body['agents']:
- l3_agent_ids.append(agent['id'])
- self.assertIn('agent_type', agent)
- self.assertEqual('L3 agent', agent['agent_type'])
- self.assertIn(self.agent['id'], l3_agent_ids)
- body = self.admin_agents_client.delete_router_from_l3_agent(
- self.agent['id'],
- self.router['id'])
- # NOTE(afazekas): The deletion not asserted, because neutron
- # is not forbidden to reschedule the router to the same agent
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index a075b51..9d1e2a7 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -30,7 +30,7 @@
It is also assumed that the per-project quota extension API is configured
in /etc/neutron/neutron.conf as follows:
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
"""
@classmethod
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index b1e4a58..ef5ebb6 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -35,7 +35,7 @@
It is also assumed that the per-project quota extension API is configured
in /etc/neutron/neutron.conf as follows:
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
"""
@classmethod
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 3ab2909..eb31ed3 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -206,7 +206,7 @@
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
- subnet_slaac]]
+ subnet_slaac]]
self.ports_client.delete_port(port['id'])
self.ports.pop()
body = self.ports_client.list_ports()
@@ -257,7 +257,7 @@
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
- subnet_slaac]]
+ subnet_slaac]]
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7345fd1..ed8eb52 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -317,7 +317,7 @@
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
- 'dns_nameservers',
+ 'dns_nameservers',
'allocation_pools']))
subnet_id = subnet['id']
new_gateway = str(netaddr.IPAddress(
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 2c9159c..25976ce 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -107,7 +107,7 @@
address = self.cidr
address.prefixlen = self.mask_bits
if ((address.version == 4 and address.prefixlen >= 30) or
- (address.version == 6 and address.prefixlen >= 126)):
+ (address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 1c56eb2..9136139 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -36,7 +36,7 @@
# process is finished.
fetched_list = self.admin_volume_client.list_volumes(
params={'all_tenants': True,
- 'display_name': vol['name']})['volumes']
+ 'name': vol['name']})['volumes']
for fetched_vol in fetched_list:
if fetched_vol['id'] != vol['id']:
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index c178272..6ce5d3e 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -50,6 +50,7 @@
'available')
return restored_volume
+ @decorators.skip_because(bug="1483434")
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'ceph does not support arbitrary container names')
@decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
diff --git a/tempest/clients.py b/tempest/clients.py
index e5d5be1..0506646 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -44,6 +44,7 @@
self._set_object_storage_clients()
self._set_image_clients()
self._set_network_clients()
+ self.placement_client = self.placement.PlacementClient()
# TODO(andreaf) This is maintained for backward compatibility
# with plugins, but it should removed eventually, since it was
# never a stable interface and it's not useful anyways
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index f6af0ba..e6db2e9 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -310,8 +310,8 @@
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
- f.write(json.dumps(data,
- sort_keys=True, indent=2, separators=(',', ': ')))
+ f.write(json.dumps(data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
def _load_json(self, saved_state_json=SAVED_STATE_JSON):
try:
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 6b6a59b..104958a 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -158,7 +158,7 @@
try:
client.delete_snapshot(snap['id'])
except Exception:
- LOG.exception("Delete Snapshot exception.")
+ LOG.exception("Delete Snapshot %s exception.", snap['id'])
def dry_run(self):
snaps = self.list()
@@ -195,7 +195,7 @@
try:
client.delete_server(server['id'])
except Exception:
- LOG.exception("Delete Server exception.")
+ LOG.exception("Delete Server %s exception.", server['id'])
def dry_run(self):
servers = self.list()
@@ -227,7 +227,7 @@
try:
client.delete_server_group(sg['id'])
except Exception:
- LOG.exception("Delete Server Group exception.")
+ LOG.exception("Delete Server Group %s exception.", sg['id'])
def dry_run(self):
sgs = self.list()
@@ -260,11 +260,11 @@
client = self.client
keypairs = self.list()
for k in keypairs:
+ name = k['keypair']['name']
try:
- name = k['keypair']['name']
client.delete_keypair(name)
except Exception:
- LOG.exception("Delete Keypairs exception.")
+ LOG.exception("Delete Keypair %s exception.", name)
def dry_run(self):
keypairs = self.list()
@@ -300,7 +300,7 @@
try:
client.delete_volume(v['id'])
except Exception:
- LOG.exception("Delete Volume exception.")
+ LOG.exception("Delete Volume %s exception.", v['id'])
def dry_run(self):
vols = self.list()
@@ -323,7 +323,8 @@
try:
client.delete_quota_set(self.project_id)
except Exception:
- LOG.exception("Delete Volume Quotas exception.")
+ LOG.exception("Delete Volume Quotas exception for 'project %s'.",
+ self.project_id)
def dry_run(self):
quotas = self.client.show_quota_set(
@@ -342,7 +343,8 @@
try:
client.delete_quota_set(self.project_id)
except Exception:
- LOG.exception("Delete Quotas exception.")
+ LOG.exception("Delete Quotas exception for 'project %s'.",
+ self.project_id)
def dry_run(self):
client = self.limits_client
@@ -362,6 +364,7 @@
self.metering_label_rules_client = manager.metering_label_rules_client
self.security_groups_client = manager.security_groups_client
self.routers_client = manager.routers_client
+ self.subnetpools_client = manager.subnetpools_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
@@ -396,7 +399,7 @@
try:
client.delete_network(n['id'])
except Exception:
- LOG.exception("Delete Network exception.")
+ LOG.exception("Delete Network %s exception.", n['id'])
def dry_run(self):
networks = self.list()
@@ -430,7 +433,8 @@
try:
client.delete_floatingip(flip['id'])
except Exception:
- LOG.exception("Delete Network Floating IP exception.")
+ LOG.exception("Delete Network Floating IP %s exception.",
+ flip['id'])
def dry_run(self):
flips = self.list()
@@ -466,16 +470,20 @@
ports_client = self.ports_client
routers = self.list()
for router in routers:
- try:
- rid = router['id']
- ports = [port for port
- in ports_client.list_ports(device_id=rid)['ports']
- if net_info.is_router_interface_port(port)]
- for port in ports:
+ rid = router['id']
+ ports = [port for port
+ in ports_client.list_ports(device_id=rid)['ports']
+ if net_info.is_router_interface_port(port)]
+ for port in ports:
+ try:
client.remove_router_interface(rid, port_id=port['id'])
+ except Exception:
+ LOG.exception("Delete Router Interface exception for "
+ "'port %s' of 'router %s'.", port['id'], rid)
+ try:
client.delete_router(rid)
except Exception:
- LOG.exception("Delete Router exception.")
+ LOG.exception("Delete Router %s exception.", rid)
def dry_run(self):
routers = self.list()
@@ -510,7 +518,8 @@
try:
client.delete_metering_label_rule(rule['id'])
except Exception:
- LOG.exception("Delete Metering Label Rule exception.")
+ LOG.exception("Delete Metering Label Rule %s exception.",
+ rule['id'])
def dry_run(self):
rules = self.list()
@@ -545,7 +554,8 @@
try:
client.delete_metering_label(label['id'])
except Exception:
- LOG.exception("Delete Metering Label exception.")
+ LOG.exception("Delete Metering Label %s exception.",
+ label['id'])
def dry_run(self):
labels = self.list()
@@ -584,7 +594,7 @@
try:
client.delete_port(port['id'])
except Exception:
- LOG.exception("Delete Port exception.")
+ LOG.exception("Delete Port %s exception.", port['id'])
def dry_run(self):
ports = self.list()
@@ -625,7 +635,8 @@
try:
client.delete_security_group(secgroup['id'])
except Exception:
- LOG.exception("Delete security_group exception.")
+ LOG.exception("Delete security_group %s exception.",
+ secgroup['id'])
def dry_run(self):
secgroups = self.list()
@@ -660,7 +671,7 @@
try:
client.delete_subnet(subnet['id'])
except Exception:
- LOG.exception("Delete Subnet exception.")
+ LOG.exception("Delete Subnet %s exception.", subnet['id'])
def dry_run(self):
subnets = self.list()
@@ -673,6 +684,41 @@
self.data['subnets'][subnet['id']] = subnet['name']
+class NetworkSubnetPoolsService(BaseNetworkService):
+
+ def list(self):
+ client = self.subnetpools_client
+ pools = client.list_subnetpools(**self.tenant_filter)['subnetpools']
+ if not self.is_save_state:
+ # recreate list removing saved subnet pools
+ pools = [pool for pool in pools if pool['id']
+ not in self.saved_state_json['subnetpools'].keys()]
+ if self.is_preserve:
+ pools = [pool for pool in pools if pool['project_id']
+ not in CONF_PROJECTS]
+ LOG.debug("List count, %s Subnet Pools", len(pools))
+ return pools
+
+ def delete(self):
+ client = self.subnetpools_client
+ pools = self.list()
+ for pool in pools:
+ try:
+ client.delete_subnetpool(pool['id'])
+ except Exception:
+ LOG.exception("Delete Subnet Pool %s exception.", pool['id'])
+
+ def dry_run(self):
+ pools = self.list()
+ self.data['subnetpools'] = pools
+
+ def save_state(self):
+ pools = self.list()
+ self.data['subnetpools'] = {}
+ for pool in pools:
+ self.data['subnetpools'][pool['id']] = pool['name']
+
+
# begin global services
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
@@ -700,7 +746,7 @@
try:
client.delete_flavor(flavor['id'])
except Exception:
- LOG.exception("Delete Flavor exception.")
+ LOG.exception("Delete Flavor %s exception.", flavor['id'])
def dry_run(self):
flavors = self.list()
@@ -737,7 +783,7 @@
try:
client.delete_image(image['id'])
except Exception:
- LOG.exception("Delete Image exception.")
+ LOG.exception("Delete Image %s exception.", image['id'])
def dry_run(self):
images = self.list()
@@ -780,7 +826,7 @@
try:
self.client.delete_user(user['id'])
except Exception:
- LOG.exception("Delete User exception.")
+ LOG.exception("Delete User %s exception.", user['id'])
def dry_run(self):
users = self.list()
@@ -820,7 +866,7 @@
try:
self.client.delete_role(role['id'])
except Exception:
- LOG.exception("Delete Role exception.")
+ LOG.exception("Delete Role %s exception.", role['id'])
def dry_run(self):
roles = self.list()
@@ -862,7 +908,7 @@
try:
self.client.delete_project(project['id'])
except Exception:
- LOG.exception("Delete project exception.")
+ LOG.exception("Delete project %s exception.", project['id'])
def dry_run(self):
projects = self.list()
@@ -899,7 +945,7 @@
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception:
- LOG.exception("Delete Domain exception.")
+ LOG.exception("Delete Domain %s exception.", domain['id'])
def dry_run(self):
domains = self.list()
@@ -931,6 +977,7 @@
project_services.append(NetworkSubnetService)
project_services.append(NetworkService)
project_services.append(NetworkSecGroupService)
+ project_services.append(NetworkSubnetPoolsService)
if IS_CINDER:
project_services.append(SnapshotService)
project_services.append(VolumeService)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 3e84b82..77d4496 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -103,6 +103,9 @@
from tempest.common import credentials_factory as credentials
from tempest import config
+if six.PY2:
+ # Python 2 has not FileNotFoundError exception
+ FileNotFoundError = IOError
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
@@ -112,7 +115,12 @@
def _set_env(self, config_file=None):
if config_file:
- CONF.set_config_path(os.path.abspath(config_file))
+ if os.path.exists(os.path.abspath(config_file)):
+ CONF.set_config_path(os.path.abspath(config_file))
+ else:
+ raise FileNotFoundError(
+ "Config file: %s doesn't exist" % config_file)
+
# NOTE(mtreinish): This is needed so that stestr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
@@ -202,8 +210,8 @@
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
- f.write(json.dumps(data,
- sort_keys=True, indent=2, separators=(',', ': ')))
+ f.write(json.dumps(data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
@@ -252,6 +260,7 @@
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
+ type=int, default=0,
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 8dcf575..081fa7a 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -78,11 +78,11 @@
import argparse
import collections
import io
-import json
import os
import re
import sys
+from oslo_serialization import jsonutils as json
import subunit
import testtools
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index 525110b..cd6d058 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -26,7 +26,7 @@
if project['name'] == project_name:
return project
raise lib_exc.NotFound('No such project(%s) in %s' % (project_name,
- projects))
+ projects))
def get_tenant_by_name(client, tenant_name):
diff --git a/tempest/common/utils/net_utils.py b/tempest/common/utils/net_utils.py
index 867b3dd..b697ef1 100644
--- a/tempest/common/utils/net_utils.py
+++ b/tempest/common/utils/net_utils.py
@@ -19,7 +19,6 @@
def get_unused_ip_addresses(ports_client, subnets_client,
network_id, subnet_id, count):
-
"""Return a list with the specified number of unused IP addresses
This method uses the given ports_client to find the specified number of
diff --git a/tempest/config.py b/tempest/config.py
index fbe18a3..24ae3ae 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1100,6 +1100,18 @@
""")
]
+
+profiler_group = cfg.OptGroup(name="profiler",
+ title="OpenStack Profiler")
+
+ProfilerGroup = [
+ cfg.StrOpt('key',
+ help="The secret key to enable OpenStack Profiler. The value "
+ "should match the one configured in OpenStack services "
+ "under `[profiler]/hmac_keys` property. The default empty "
+ "value keeps profiling disabled"),
+]
+
DefaultGroup = [
cfg.BoolOpt('pause_teardown',
default=False,
@@ -1132,6 +1144,7 @@
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
(placement_group, PlacementGroup),
+ (profiler_group, ProfilerGroup),
(None, DefaultGroup)
]
@@ -1249,7 +1262,7 @@
logging_cfg_path = "%s/logging.conf" % os.path.dirname(path)
if ((not hasattr(_CONF, 'log_config_append') or
- _CONF.log_config_append is None) and
+ _CONF.log_config_append is None) and
os.path.isfile(logging_cfg_path)):
# if logging conf is in place we need to set log_config_append
_CONF.log_config_append = logging_cfg_path
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index ac40eef..71ecb32 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -110,7 +110,7 @@
for item in files:
if item.endswith('.py'):
module_name = '.'.join((root_package,
- os.path.splitext(item)[0]))
+ os.path.splitext(item)[0]))
if not module_name.startswith(UNIT_TESTS_EXCLUDE):
modules.append(module_name)
return modules
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index bcb076b..d29362d 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
import testtools
from tempest.lib.common import api_version_request
@@ -54,7 +55,7 @@
config_min_version = api_version_request.APIVersionRequest(cfg_min_version)
config_max_version = api_version_request.APIVersionRequest(cfg_max_version)
if ((min_version > max_version) or
- (config_min_version > config_max_version)):
+ (config_min_version > config_max_version)):
msg = ("Test Class versions [%s - %s]. "
"Configuration versions [%s - %s]."
% (min_version.get_string(),
@@ -108,10 +109,12 @@
:param api_microversion_header_name: Microversion header name
Example- "X-OpenStack-Nova-API-Version"
- :param api_microversion: Microversion number like "2.10"
+ :param api_microversion: Microversion number like "2.10", type str.
:param response_header: Response header where microversion is
expected to be present.
"""
+ if not isinstance(api_microversion, six.string_types):
+ raise TypeError('api_microversion must be a string')
api_microversion_header_name = api_microversion_header_name.lower()
if (api_microversion_header_name not in response_header or
api_microversion != response_header[api_microversion_header_name]):
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index fcdeb17..1011504 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -273,7 +273,7 @@
# NOTE(andreaf) Not all fields may be available on all credentials
# so defaulting to None for that case.
if all([getattr(creds, k, None) == hash_attributes.get(k, None) for
- k in init_attributes]):
+ k in init_attributes]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
diff --git a/tempest/lib/common/profiler.py b/tempest/lib/common/profiler.py
new file mode 100644
index 0000000..1544337
--- /dev/null
+++ b/tempest/lib/common/profiler.py
@@ -0,0 +1,64 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import hashlib
+import hmac
+import json
+
+from oslo_utils import encodeutils
+from oslo_utils import uuidutils
+
+_profiler = {}
+
+
+def enable(profiler_key, trace_id=None):
+ """Enable global profiler instance
+
+ :param profiler_key: the secret key used to enable profiling in services
+ :param trace_id: unique id of the trace, if empty the id is generated
+ automatically
+ """
+ _profiler['key'] = profiler_key
+ _profiler['uuid'] = trace_id or uuidutils.generate_uuid()
+
+
+def disable():
+ """Disable global profiler instance"""
+ _profiler.clear()
+
+
+def serialize_as_http_headers():
+ """Serialize profiler state as HTTP headers
+
+ This function corresponds to the one from osprofiler library.
+ :return: dictionary with 2 keys `X-Trace-Info` and `X-Trace-HMAC`.
+ """
+ p = _profiler
+ if not p: # profiler is not enabled
+ return {}
+
+ info = {'base_id': p['uuid'], 'parent_id': p['uuid']}
+ trace_info = base64.urlsafe_b64encode(
+ encodeutils.to_utf8(json.dumps(info)))
+ trace_hmac = _sign(trace_info, p['key'])
+
+ return {
+ 'X-Trace-Info': trace_info,
+ 'X-Trace-HMAC': trace_hmac,
+ }
+
+
+def _sign(trace_info, key):
+ h = hmac.new(encodeutils.to_utf8(key), digestmod=hashlib.sha1)
+ h.update(trace_info)
+ return h.hexdigest()
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index e612bd1..f076727 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -27,6 +27,7 @@
from tempest.lib.common import http
from tempest.lib.common import jsonschema_validator
+from tempest.lib.common import profiler
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
@@ -131,8 +132,10 @@
accept_type = 'json'
if send_type is None:
send_type = 'json'
- return {'Content-Type': 'application/%s' % send_type,
- 'Accept': 'application/%s' % accept_type}
+ headers = {'Content-Type': 'application/%s' % send_type,
+ 'Accept': 'application/%s' % accept_type}
+ headers.update(profiler.serialize_as_http_headers())
+ return headers
def __str__(self):
STRING_LIMIT = 80
@@ -282,7 +285,7 @@
def get(self, url, headers=None, extra_headers=False):
"""Send a HTTP GET request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the get request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -297,7 +300,7 @@
def delete(self, url, headers=None, body=None, extra_headers=False):
"""Send a HTTP DELETE request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the delete request to
:param dict headers: The headers to use for the request
:param dict body: the request body
:param bool extra_headers: Boolean value than indicates if the headers
@@ -313,7 +316,7 @@
def patch(self, url, body, headers=None, extra_headers=False):
"""Send a HTTP PATCH request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the patch request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
@@ -329,7 +332,7 @@
def put(self, url, body, headers=None, extra_headers=False, chunked=False):
"""Send a HTTP PUT request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the put request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
@@ -346,7 +349,7 @@
def head(self, url, headers=None, extra_headers=False):
"""Send a HTTP HEAD request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the head request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -361,7 +364,7 @@
def copy(self, url, headers=None, extra_headers=False):
"""Send a HTTP COPY request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the copy request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 3483c51..7f94612 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -170,7 +170,7 @@
:rtype: string
"""
return b''.join([six.int2byte(random.randint(0, 255))
- for i in range(size)])
+ for i in range(size)])
# Courtesy of http://stackoverflow.com/a/312464
diff --git a/tempest/lib/common/utils/misc.py b/tempest/lib/common/utils/misc.py
index 2b0fcd5..a0b0c0a 100644
--- a/tempest/lib/common/utils/misc.py
+++ b/tempest/lib/common/utils/misc.py
@@ -12,9 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
def singleton(cls):
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index b399aa0..808e0fb 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -136,10 +136,17 @@
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
+
+ :param condition: Optional condition which if true will apply the attr. If
+ a condition is specified which is false the attr will not be applied to
+ the test function. If not specified, the attr is always applied.
"""
def decorator(f):
- if 'type' in kwargs and isinstance(kwargs['type'], str):
+ # Check to see if the attr should be conditional applied.
+ if 'condition' in kwargs and not kwargs.get('condition'):
+ return f
+ if 'type' in kwargs and isinstance(kwargs['type'], six.string_types):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
@@ -147,3 +154,45 @@
return f
return decorator
+
+
+def unstable_test(*args, **kwargs):
+ """A decorator useful to run tests hitting known bugs and skip it if fails
+
+ This decorator can be used in cases like:
+
+ * We have skipped tests with some bug and now bug is claimed to be fixed.
+ Now we want to check the test stability so we use this decorator.
+ The number of skipped cases with that bug can be counted to mark test
+ stable again.
+ * There is test which is failing often, but not always. If there is known
+ bug related to it, and someone is working on fix, this decorator can be
+ used instead of "skip_because". That will ensure that test is still run
+ so new debug data can be collected from jobs' logs but it will not make
+ life of other developers harder by forcing them to recheck jobs more
+ often.
+
+ ``bug`` must be a number for the test to skip.
+
+ :param bug: bug number causing the test to skip (launchpad or storyboard)
+ :param bug_type: 'launchpad' or 'storyboard', default 'launchpad'
+ :raises: testtools.TestCase.skipException if test actually fails,
+ and ``bug`` is included
+ """
+ def decor(f):
+ @functools.wraps(f)
+ def inner(self, *func_args, **func_kwargs):
+ try:
+ return f(self, *func_args, **func_kwargs)
+ except Exception as e:
+ if "bug" in kwargs:
+ bug = kwargs['bug']
+ bug_type = kwargs.get('bug_type', 'launchpad')
+ bug_url = _get_bug_url(bug, bug_type)
+ msg = ("Marked as unstable and skipped because of bug: "
+ "%s, failure was: %s") % (bug_url, e)
+ raise testtools.TestCase.skipException(msg)
+ else:
+ raise e
+ return inner
+ return decor
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index 2fad0a4..5d2dd46 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -172,7 +172,7 @@
https://developer.openstack.org/api-ref/compute/#show-an-extra-spec-for-a-flavor
"""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (flavor_id,
- key))
+ key))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 9eed4b3..18e08cc 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -636,7 +636,7 @@
def list_virtual_interfaces(self, server_id):
"""List the virtual interfaces used in an instance."""
resp, body = self.get('/'.join(['servers', server_id,
- 'os-virtual-interfaces']))
+ 'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index dbb9acc..d09f20c 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -125,6 +125,27 @@
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
+
+ :param **kwargs:
+ See extra parameters below
+
+ :Keyword Arguments:
+ * *vnic_type* (``string``) --
+ used when launching instances with pre-configured ports.
+ Examples:
+ normal: a traditional virtual port that is either attached
+ to a linux bridge or an openvswitch bridge on a
+ compute node.
+ direct: an SR-IOV port that is directly attached to a VM
+ macvtap: an SR-IOV port that is attached to a VM via a macvtap
+ device.
+ Defaults to ``CONF.network.port_vnic_type``.
+ * *port_profile* (``dict``) --
+ This attribute is a dictionary that can be used (with admin
+ credentials) to supply information influencing the binding of
+ the port.
+ example: port_profile = "capabilities:[switchdev]"
+ Defaults to ``CONF.network.port_profile``.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
@@ -143,8 +164,8 @@
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-server")
- vnic_type = CONF.network.port_vnic_type
- profile = CONF.network.port_profile
+ vnic_type = kwargs.pop('vnic_type', CONF.network.port_vnic_type)
+ profile = kwargs.pop('port_profile', CONF.network.port_profile)
# If vnic_type or profile are configured create port for
# every network
@@ -166,7 +187,7 @@
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
- for s in security_groups])
+ for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
@@ -303,6 +324,25 @@
snapshot['id'])['snapshot']
return snapshot
+ def _cleanup_volume_type(self, volume_type):
+ """Clean up a given volume type.
+
+ Ensuring all volumes associated to a type are first removed before
+ attempting to remove the type itself. This includes any image volume
+ cache volumes stored in a separate tenant to the original volumes
+ created from the type.
+ """
+ admin_volume_type_client = self.os_admin.volume_types_client_latest
+ admin_volumes_client = self.os_admin.volumes_client_latest
+ volumes = admin_volumes_client.list_volumes(
+ detail=True, params={'all_tenants': 1})['volumes']
+ type_name = volume_type['name']
+ for volume in [v for v in volumes if v['volume_type'] == type_name]:
+ test_utils.call_and_ignore_notfound_exc(
+ admin_volumes_client.delete_volume, volume['id'])
+ admin_volumes_client.wait_for_resource_deletion(volume['id'])
+ admin_volume_type_client.delete_volume_type(volume_type['id'])
+
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
client = self.os_admin.volume_types_client_latest
@@ -317,7 +357,7 @@
volume_type = client.create_volume_type(
name=randomized_name, extra_specs=extra_specs)['volume_type']
- self.addCleanup(client.delete_volume_type, volume_type['id'])
+ self.addCleanup(self._cleanup_volume_type, volume_type)
return volume_type
def _create_loginable_secgroup_rule(self, secgroup_id=None):
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2b35e45..cee543b 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -48,6 +48,7 @@
10. Check SSH connection to instance after reboot
"""
+
def nova_show(self, server):
got_server = (self.servers_client.show_server(server['id'])
['server'])
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 4be2b29..37bcd04 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -119,6 +119,7 @@
server, keypair, floating_ip)
@decorators.idempotent_id('7b6860c2-afa3-4846-9522-adeb38dfbe08')
+ @decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_server_connectivity_reboot(self):
keypair = self.create_keypair()
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 1671216..02bc692 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
+from oslo_serialization import jsonutils as json
from tempest.common import utils
from tempest.common import waiters
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 3bc5b0b..6ed7e30 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -11,7 +11,7 @@
# under the License.
from oslo_log import log as logging
-from oslo_serialization import jsonutils
+from oslo_serialization import jsonutils as json
import testtools
from tempest.common import utils
@@ -86,7 +86,6 @@
'Cinder volume snapshots are disabled')
@utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
-
"""This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
@@ -265,7 +264,7 @@
bdms = image.get('block_device_mapping')
if not bdms:
bdms = image['properties']['block_device_mapping']
- bdms = jsonutils.loads(bdms)
+ bdms = json.loads(bdms)
snapshot_id = bdms[0]['snapshot_id']
self._delete_snapshot(snapshot_id)
diff --git a/tempest/test.py b/tempest/test.py
index c3c58dc..85000b6 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -28,6 +28,7 @@
from tempest.common import utils
from tempest import config
from tempest.lib.common import fixed_network
+from tempest.lib.common import profiler
from tempest.lib.common import validation_resources as vr
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -231,6 +232,9 @@
if CONF.pause_teardown:
BaseTestCase.insert_pdb_breakpoint()
+ if CONF.profiler.key:
+ profiler.disable()
+
@classmethod
def insert_pdb_breakpoint(cls):
"""Add pdb breakpoint.
@@ -608,6 +612,8 @@
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
+ if CONF.profiler.key:
+ profiler.enable(CONF.profiler.key)
@property
def credentials_provider(self):
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 9c18052..7a037eb 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -179,6 +179,7 @@
This class is used to manage the lifecycle of external tempest test
plugins. It provides functions for getting set
"""
+
def __init__(self):
self.ext_plugins = stevedore.ExtensionManager(
'tempest.test_plugins', invoke_on_load=True,
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 330f370..143c6e1 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -37,7 +37,7 @@
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
- top_level_dir=base_path))
+ top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 59e5636..3262b1c 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -104,6 +104,7 @@
cleanup_service.CONF_PROJECTS[0],
"ports": cleanup_service.CONF_PUB_NETWORK,
"routers": cleanup_service.CONF_PUB_ROUTER,
+ "subnetpools": cleanup_service.CONF_PROJECTS[0],
}
saved_state = {
@@ -139,7 +140,8 @@
"metering_labels": {u'723b346ce866-4c7q': u'saved-label'},
"ports": {u'aa74aa4v-741a': u'saved-port'},
"security_groups": {u'7q844add-3697': u'saved-sec-group'},
- "subnets": {u'55ttda4a-2584': u'saved-subnet'}
+ "subnets": {u'55ttda4a-2584': u'saved-subnet'},
+ "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'}
}
# Mocked methods
get_method = 'tempest.lib.common.rest_client.RestClient.get'
@@ -1097,6 +1099,74 @@
self._test_is_preserve_true([(self.get_method, self.response, 200)])
+class TestNetworkSubnetPoolsService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSubnetPoolsService'
+ service_name = 'subnetpools'
+ response = {
+ "subnetpools": [
+ {
+ "min_prefixlen": "64",
+ "default_prefixlen": "64",
+ "id": "03f761e6-eee0-43fc-a921-8acf64c14988",
+ "max_prefixlen": "64",
+ "name": "my-subnet-pool-ipv6",
+ "is_default": False,
+ "project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
+ "tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
+ "prefixes": [
+ "2001:db8:0:2::/64",
+ "2001:db8::/63"
+ ],
+ "ip_version": 6,
+ "shared": False,
+ "description": "",
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41",
+ "revision_number": 2,
+ "tags": ["tag1,tag2"]
+ },
+ {
+ "id": "8acf64c1-43fc",
+ "name": "saved-subnet-pool"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['subnetpools'].append(
+ {
+ "min_prefixlen": "64",
+ "default_prefixlen": "64",
+ "id": "9acf64c1-43fc",
+ "name": "preserve-pool",
+ "project_id": cleanup_service.CONF_PROJECTS[0],
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41"
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
# begin global services
class TestDomainService(BaseCmdServiceTests):
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 00f8bc5..0e00d94 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -29,6 +29,10 @@
from tempest.lib.common.utils import data_utils
from tempest.tests import base
+if six.PY2:
+ # Python 2 has not FileNotFoundError exception
+ FileNotFoundError = IOError
+
DEVNULL = open(os.devnull, 'wb')
atexit.register(DEVNULL.close)
@@ -244,14 +248,22 @@
# getting set in os environment when some data has passed to
# set the environment.
- self.run_cmd._set_env("/fakedir/fakefile")
- self.assertEqual("/fakedir/fakefile", CONF._path)
- self.assertIn('TEMPEST_CONFIG_DIR', os.environ)
- self.assertEqual("/fakedir/fakefile",
- os.path.join(os.environ['TEMPEST_CONFIG_DIR'],
- os.environ['TEMPEST_CONFIG']))
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
- def test_tempest_run_set_config_no_path(self):
+ self.run_cmd._set_env(path)
+ self.assertEqual(path, CONF._path)
+ self.assertIn('TEMPEST_CONFIG_DIR', os.environ)
+ self.assertEqual(path, os.path.join(os.environ['TEMPEST_CONFIG_DIR'],
+ os.environ['TEMPEST_CONFIG']))
+
+ def test_tempest_run_set_config_no_exist_path(self):
+ path = "fake/path"
+ self.assertRaisesRegex(FileNotFoundError,
+ 'Config file: .* doesn\'t exist',
+ self.run_cmd._set_env, path)
+
+ def test_tempest_run_no_config_path(self):
# Note: (mbindlish) This test is created for the bug id: 1783751
# Checking TEMPEST_CONFIG_DIR and TEMPEST_CONFIG should have no value
# in os environment when no data has passed to set the environment.
@@ -313,13 +325,15 @@
def test_config_file_specified(self):
self._setup_test_dirs()
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
parsed_args = mock.Mock()
parsed_args.workspace = None
parsed_args.state = None
parsed_args.list_tests = False
- parsed_args.config_file = '.stestr.conf'
+ parsed_args.config_file = path
with mock.patch('stestr.commands.run_command') as m:
m.return_value = 0
@@ -341,13 +355,15 @@
def test_config_file_workspace_registered(self):
self._setup_test_dirs()
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
parsed_args = mock.Mock()
parsed_args.workspace = self.name
parsed_args.workspace_path = self.store_file
parsed_args.state = None
parsed_args.list_tests = False
- parsed_args.config_file = '.stestr.conf'
+ parsed_args.config_file = path
with mock.patch('stestr.commands.run_command') as m:
m.return_value = 0
@@ -406,13 +422,15 @@
@mock.patch('tempest.cmd.run.TempestRun._init_state')
def test_no_workspace_config_file_state_true(self, mock_init_state):
self._setup_test_dirs()
+ _, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
parsed_args = mock.Mock()
parsed_args.workspace = None
parsed_args.workspace_path = self.store_file
parsed_args.state = True
parsed_args.list_tests = False
- parsed_args.config_file = '.stestr.conf'
+ parsed_args.config_file = path
with mock.patch('stestr.commands.run_command') as m:
m.return_value = 0
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index b4f6c5f..7a6b576 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -48,7 +48,7 @@
stdout, stderr = process.communicate()
return_code = process.returncode
msg = ("%s failed with:\nstdout: %s\nstderr: %s" % (' '.join(cmd),
- stdout, stderr))
+ stdout, stderr))
self.assertEqual(return_code, expected, msg)
def test_run_workspace_list(self):
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index ebcf5d1..4723458 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -109,8 +109,8 @@
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': id, 'name': name},
- {'id': '1', 'name': 'FakeRole'},
- {'id': '2', 'name': 'Member'}]}))))
+ {'id': '1', 'name': 'FakeRole'},
+ {'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_2_roles(self):
@@ -120,8 +120,8 @@
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': '1234', 'name': 'role1'},
- {'id': '1', 'name': 'FakeRole'},
- {'id': '12345', 'name': 'role2'}]}))))
+ {'id': '1', 'name': 'FakeRole'},
+ {'id': '12345', 'name': 'role2'}]}))))
return roles_fix
def _mock_assign_user_role(self):
diff --git a/tempest/tests/lib/common/test_profiler.py b/tempest/tests/lib/common/test_profiler.py
new file mode 100644
index 0000000..59fa0364
--- /dev/null
+++ b/tempest/tests/lib/common/test_profiler.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import testtools
+
+from tempest.lib.common import profiler
+
+
+class TestProfiler(testtools.TestCase):
+
+ def test_serialize(self):
+ key = 'SECRET_KEY'
+ pm = {'key': key, 'uuid': 'ID'}
+
+ with mock.patch('tempest.lib.common.profiler._profiler', pm):
+ with mock.patch('json.dumps') as jdm:
+ jdm.return_value = '{"base_id": "ID", "parent_id": "ID"}'
+
+ expected = {
+ 'X-Trace-HMAC':
+ '887292df9f13b8b5ecd6bbbd2e16bfaaa4d914b0',
+ 'X-Trace-Info':
+ b'eyJiYXNlX2lkIjogIklEIiwgInBhcmVudF9pZCI6ICJJRCJ9'
+ }
+
+ self.assertEqual(expected,
+ profiler.serialize_as_http_headers())
+
+ def test_profiler_lifecycle(self):
+ key = 'SECRET_KEY'
+ uuid = 'ID'
+
+ self.assertEqual({}, profiler._profiler)
+
+ profiler.enable(key, uuid)
+ self.assertEqual({'key': key, 'uuid': uuid}, profiler._profiler)
+
+ profiler.disable()
+ self.assertEqual({}, profiler._profiler)
+
+ @mock.patch('oslo_utils.uuidutils.generate_uuid')
+ def test_profiler_lifecycle_generate_trace_id(self, generate_uuid_mock):
+ key = 'SECRET_KEY'
+ uuid = 'ID'
+ generate_uuid_mock.return_value = uuid
+
+ self.assertEqual({}, profiler._profiler)
+
+ profiler.enable(key)
+ self.assertEqual({'key': key, 'uuid': uuid}, profiler._profiler)
+
+ profiler.disable()
+ self.assertEqual({}, profiler._profiler)
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index 4c0bb57..b861582 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -13,10 +13,10 @@
# under the License.
import copy
-import json
import fixtures
import jsonschema
+from oslo_serialization import jsonutils as json
import six
from tempest.lib.common import http
diff --git a/tempest/tests/lib/services/compute/test_images_client.py b/tempest/tests/lib/services/compute/test_images_client.py
index c2c3b76..d1500e5 100644
--- a/tempest/tests/lib/services/compute/test_images_client.py
+++ b/tempest/tests/lib/services/compute/test_images_client.py
@@ -186,15 +186,19 @@
def _test_resource_deleted(self, bytes_body=False):
params = {"id": self.FAKE_IMAGE_ID}
expected_op = self.FAKE_IMAGE_DATA['show']
- self.useFixture(fixtures.MockPatch('tempest.lib.services.compute'
- '.images_client.ImagesClient.show_image',
- side_effect=lib_exc.NotFound))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'tempest.lib.services.compute'
+ '.images_client.ImagesClient.show_image',
+ side_effect=lib_exc.NotFound))
self.assertEqual(True, self.client.is_resource_deleted(**params))
tempdata = copy.deepcopy(self.FAKE_IMAGE_DATA['show'])
tempdata['image']['id'] = None
- self.useFixture(fixtures.MockPatch('tempest.lib.services.compute'
- '.images_client.ImagesClient.show_image',
- return_value=expected_op))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'tempest.lib.services.compute'
+ '.images_client.ImagesClient.show_image',
+ return_value=expected_op))
self.assertEqual(False, self.client.is_resource_deleted(**params))
def test_list_images_with_str_body(self):
diff --git a/tempest/tests/lib/services/identity/v2/test_token_client.py b/tempest/tests/lib/services/identity/v2/test_token_client.py
index dfce9b3..a592ada 100644
--- a/tempest/tests/lib/services/identity/v2/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v2/test_token_client.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
+from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
diff --git a/tempest/tests/lib/services/identity/v3/test_token_client.py b/tempest/tests/lib/services/identity/v3/test_token_client.py
index 38e8c4a..a9c58df 100644
--- a/tempest/tests/lib/services/identity/v3/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_token_client.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
+from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
index 1da2112..07af68a 100644
--- a/tempest/tests/lib/services/registry_fixture.py
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -37,8 +37,9 @@
def __init__(self):
"""Initialise the registry fixture"""
self.services = set(['compute', 'identity.v2', 'identity.v3',
- 'image.v1', 'image.v2', 'network', 'volume.v1',
- 'volume.v2', 'volume.v3', 'object-storage'])
+ 'image.v1', 'image.v2', 'network', 'placement',
+ 'volume.v1', 'volume.v2', 'volume.v3',
+ 'object-storage'])
def _setUp(self):
# Cleanup the registry
diff --git a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
index e0f5566..84c7589 100644
--- a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
@@ -62,7 +62,7 @@
resp_body = self.FAKE_POOLS_LIST
else:
resp_body = {'pools': [{'name': pool['name']}
- for pool in self.FAKE_POOLS_LIST['pools']]}
+ for pool in self.FAKE_POOLS_LIST['pools']]}
self.check_service_client_function(
self.client.list_pools,
'tempest.lib.common.rest_client.RestClient.get',
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index 0b1a599..9c6cac7 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -13,7 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
+
import mock
+import six
import testtools
from tempest.lib import base as test
@@ -32,9 +35,17 @@
# By our decorators.attr decorator the attribute __testtools_attrs
# will be set only for 'type' argument, so we test it first.
if 'type' in decorator_args:
- # this is what testtools sets
- self.assertEqual(getattr(foo, '__testtools_attrs'),
- set(expected_attrs))
+ if 'condition' in decorator_args:
+ if decorator_args['condition']:
+ # The expected attrs should be in the function.
+ self.assertEqual(set(expected_attrs),
+ getattr(foo, '__testtools_attrs'))
+ else:
+ # The expected attrs should not be in the function.
+ self.assertNotIn('__testtools_attrs', foo)
+ else:
+ self.assertEqual(set(expected_attrs),
+ getattr(foo, '__testtools_attrs'))
def test_attr_without_type(self):
self._test_attr_helper(expected_attrs='baz', bar='baz')
@@ -50,10 +61,44 @@
def test_attr_decorator_with_duplicated_type(self):
self._test_attr_helper(expected_attrs=['foo'], type=['foo', 'foo'])
+ def test_attr_decorator_condition_false(self):
+ self._test_attr_helper(None, type='slow', condition=False)
-class TestSkipBecauseDecorator(base.TestCase):
- def _test_skip_because_helper(self, expected_to_skip=True,
- **decorator_args):
+ def test_attr_decorator_condition_true(self):
+ self._test_attr_helper(expected_attrs=['slow'], type='slow',
+ condition=True)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseSkipDecoratorTests(object):
+
+ @abc.abstractmethod
+ def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
+ **decorator_args):
+ return
+
+ def test_skip_launchpad_bug(self):
+ self._test_skip_helper(bug='12345')
+
+ def test_skip_storyboard_bug(self):
+ self._test_skip_helper(bug='1992', bug_type='storyboard')
+
+ def test_skip_bug_without_bug_never_skips(self):
+ """Never skip without a bug parameter."""
+ self._test_skip_helper(
+ raise_exception=False, expected_to_skip=False, condition=True)
+ self._test_skip_helper(
+ raise_exception=False, expected_to_skip=False)
+
+ def test_skip_invalid_bug_number(self):
+ """Raise InvalidParam if with an invalid bug number"""
+ self.assertRaises(lib_exc.InvalidParam, self._test_skip_helper,
+ bug='critical_bug')
+
+
+class TestSkipBecauseDecorator(base.TestCase, BaseSkipDecoratorTests):
+ def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
+ **decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@@ -75,38 +120,56 @@
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
- def test_skip_because_launchpad_bug(self):
- self._test_skip_because_helper(bug='12345')
-
def test_skip_because_launchpad_bug_and_condition_true(self):
- self._test_skip_because_helper(bug='12348', condition=True)
+ self._test_skip_helper(bug='12348', condition=True)
def test_skip_because_launchpad_bug_and_condition_false(self):
- self._test_skip_because_helper(expected_to_skip=False,
- bug='12349', condition=False)
-
- def test_skip_because_storyboard_bug(self):
- self._test_skip_because_helper(bug='1992', bug_type='storyboard')
-
- def test_skip_because_storyboard_bug_and_condition_true(self):
- self._test_skip_because_helper(bug='1992', bug_type='storyboard',
- condition=True)
+ self._test_skip_helper(expected_to_skip=False,
+ bug='12349', condition=False)
def test_skip_because_storyboard_bug_and_condition_false(self):
- self._test_skip_because_helper(expected_to_skip=False,
- bug='1992', bug_type='storyboard',
- condition=False)
+ self._test_skip_helper(expected_to_skip=False,
+ bug='1992', bug_type='storyboard',
+ condition=False)
- def test_skip_because_bug_without_bug_never_skips(self):
- """Never skip without a bug parameter."""
- self._test_skip_because_helper(expected_to_skip=False,
- condition=True)
- self._test_skip_because_helper(expected_to_skip=False)
+ def test_skip_because_storyboard_bug_and_condition_true(self):
+ self._test_skip_helper(bug='1992', bug_type='storyboard',
+ condition=True)
- def test_skip_because_invalid_bug_number(self):
- """Raise InvalidParam if with an invalid bug number"""
- self.assertRaises(lib_exc.InvalidParam, self._test_skip_because_helper,
- bug='critical_bug')
+
+class TestUnstableTestDecorator(base.TestCase, BaseSkipDecoratorTests):
+
+ def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
+ **decorator_args):
+ fail_test_reason = "test_bar failed"
+
+ class TestFoo(test.BaseTestCase):
+
+ @decorators.unstable_test(**decorator_args)
+ def test_bar(self):
+ if raise_exception:
+ raise Exception(fail_test_reason)
+ else:
+ return 0
+
+ t = TestFoo('test_bar')
+ if expected_to_skip:
+ e = self.assertRaises(testtools.TestCase.skipException, t.test_bar)
+ bug = decorator_args['bug']
+ bug_type = decorator_args.get('bug_type', 'launchpad')
+ self.assertRegex(
+ str(e),
+ r'Marked as unstable and skipped because of bug\: %s.*, '
+ 'failure was: %s' % (decorators._get_bug_url(bug, bug_type),
+ fail_test_reason)
+ )
+ else:
+ # assert that test_bar returned 0
+ self.assertEqual(TestFoo('test_bar').test_bar(), 0)
+
+ def test_skip_bug_given_exception_not_raised(self):
+ self._test_skip_helper(raise_exception=False, expected_to_skip=False,
+ bug='1234')
class TestIdempotentIdDecorator(base.TestCase):
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 9534ce8..83c1abb 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -48,6 +48,7 @@
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
+
def test_no_setup_teardown_class_for_tests(self):
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
diff --git a/tools/format.sh b/tools/format.sh
new file mode 100755
index 0000000..dec8f1c
--- /dev/null
+++ b/tools/format.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+cd $(dirname "$(readlink -f "$0")")
+
+AUTOPEP8=`which autopep8 2>/dev/null`
+
+if [[ -z "$AUTOPEP8" ]]; then
+ AUTOPEP8=`which autopep8-3`
+fi
+
+if [[ -z "$AUTOPEP8" ]]; then
+ echo "Unable to locate autopep8" >&2
+ exit 2
+fi
+
+# isort is not compatible with the default flake8 (H306), maybe flake8-isort
+# isort -rc -sl -fss ../tempest ../setup.py
+$AUTOPEP8 --exit-code --max-line-length=79 --experimental --in-place -r ../tempest ../setup.py
+ERROR=$?
+
+if [[ $ERROR -eq 0 ]]; then
+ echo "Formatting was not needed." >&2
+ exit 0
+elif [[ $ERROR -eq 1 ]]; then
+ echo "Formatting failed.." >&2
+ exit 1
+else
+ echo "done" >&2
+fi
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index 111c9ce..17a4059 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -69,7 +69,7 @@
i=0
for plugin in ${sorted_plugins}; do
i=$((i+1))
- giturl="git://git.openstack.org/openstack/${plugin}"
+ giturl="https://git.openstack.org/openstack/${plugin}"
gitlink="https://git.openstack.org/cgit/openstack/${plugin}"
printf "%-3s %-${name_col_len}s %s\n" "$i" "${plugin}" "\`${giturl} <${gitlink}>\`__"
done
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index 661329b..703dce2 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -18,20 +18,17 @@
# This script is intended to check the sanity of tempest plugins against
# tempest master.
# What it does:
-# * Creates the virtualenv
-# * Install tempest
# * Retrieve the project lists having tempest plugin if project name is
# given.
-# * For each project in a list, It does:
+# * For each project in a list, it does:
+# * Create virtualenv and install tempest in it
# * Clone the Project
# * Install the Project and also installs dependencies from
# test-requirements.txt.
# * Create Tempest workspace
# * List tempest plugins
# * List tempest plugins tests
-# * Uninstall the project and its dependencies
-# * Again Install tempest
-# * Again repeat the step from cloning project
+# * Delete virtualenv and project repo
#
# If one of the step fails, The script will exit with failure.
@@ -46,31 +43,64 @@
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
-# List of projects having tempest plugin stale or unmaintained from long time
-BLACKLIST="networking-plumgrid,trio2o"
+# List of projects having tempest plugin stale or unmaintained for a long time
+# (6 months or more)
+# TODO(masayukig): Some of these can be removed from BLACKLIST in the future.
+# airship-tempest-plugin: https://review.openstack.org/#/c/634387/
+# barbican-tempest-plugin: https://review.openstack.org/#/c/634631/
+# intel-nfv-ci-tests: https://review.openstack.org/#/c/634640/
+# networking-ansible: https://review.openstack.org/#/c/634647/
+# networking-generic-switch: https://review.openstack.org/#/c/634846/
+# networking-l2gw-tempest-plugin: https://review.openstack.org/#/c/635093/
+# networking-midonet: https://review.openstack.org/#/c/635096/
+# networking-plumgrid: https://review.openstack.org/#/c/635096/
+# networking-spp: https://review.openstack.org/#/c/635098/
+# neutron-dynamic-routing: https://review.openstack.org/#/c/637718/
+# neutron-vpnaas: https://review.openstack.org/#/c/637719/
+# nova-lxd: https://review.openstack.org/#/c/638334/
+# valet: https://review.openstack.org/#/c/638339/
+# vitrage-tempest-plugin: https://review.openstack.org/#/c/639003/
+BLACKLIST="
+airship-tempest-plugin
+barbican-tempest-plugin
+intel-nfv-ci-tests
+networking-ansible
+networking-generic-switch
+networking-l2gw-tempest-plugin
+networking-midonet
+networking-plumgrid
+networking-spp
+neutron-dynamic-routing
+neutron-vpnaas
+nova-lxd
+valet
+vitrage-tempest-plugin
+"
# Function to clone project using zuul-cloner or from git
function clone_project() {
if [ -e /usr/zuul-env/bin/zuul-cloner ]; then
/usr/zuul-env/bin/zuul-cloner --cache-dir /opt/git \
- git://git.openstack.org \
+ https://git.openstack.org \
openstack/"$1"
elif [ -e /usr/bin/git ]; then
- /usr/bin/git clone git://git.openstack.org/openstack/"$1" \
+ /usr/bin/git clone https://git.openstack.org/openstack/"$1" \
openstack/"$1"
fi
}
-# Create virtualenv to perform sanity operation
-SANITY_DIR=$(pwd)
-virtualenv "$SANITY_DIR"/.venv
-export TVENV="$SANITY_DIR/tools/with_venv.sh"
-cd "$SANITY_DIR"
+# function to create virtualenv to perform sanity operation
+function prepare_workspace() {
+ SANITY_DIR=$(pwd)
+ virtualenv --clear "$SANITY_DIR"/.venv
+ export TVENV="$SANITY_DIR/tools/with_venv.sh"
+ cd "$SANITY_DIR"
-# Install tempest in a venv
-"$TVENV" pip install .
+ # Install tempest with test dependencies in a venv
+ "$TVENV" pip install -e . -r test-requirements.txt
+}
# Function to install project
function install_project() {
@@ -83,30 +113,31 @@
# Function to perform sanity checking on Tempest plugin
function tempest_sanity() {
- "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity
- cd "$SANITY_DIR"/tempest_sanity
- "$TVENV" tempest list-plugins
+ "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity && \
+ cd "$SANITY_DIR"/tempest_sanity && \
+ "$TVENV" tempest list-plugins && \
"$TVENV" tempest run -l
+ retval=$?
# Delete tempest workspace
+ # NOTE: Cleaning should be done even if an error occurs.
"$TVENV" tempest workspace remove --name tempest_sanity --rmdir
cd "$SANITY_DIR"
-}
-
-# Function to uninstall project
-function uninstall_project() {
- "$TVENV" pip uninstall -y "$SANITY_DIR"/openstack/"$1"
- # Check for *requirements.txt file in a project then uninstall it.
- if [ -e "$SANITY_DIR"/openstack/"$1"/*requirements.txt ]; then
- "$TVENV" pip uninstall -y -r "$SANITY_DIR"/openstack/"$1"/*requirements.txt
- fi
+ # Remove the sanity workspace in case of remaining
+ rm -fr "$SANITY_DIR"/tempest_sanity
# Remove the project directory after sanity run
rm -fr "$SANITY_DIR"/openstack/"$1"
+
+ return $retval
}
# Function to run sanity check on each project
function plugin_sanity_check() {
- clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
- && uninstall_project "$1" && "$TVENV" pip install .
+ prepare_workspace && \
+ clone_project "$1" && \
+ install_project "$1" && \
+ tempest_sanity "$1"
+
+ return $?
}
# Log status
@@ -117,11 +148,12 @@
# Remove blacklisted tempest plugins
if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
plugin_sanity_check $project && passed_plugin+=", $project" || \
- failed_plugin+=", $project"
+ failed_plugin+="$project, " > $SANITY_DIR/$project.txt
fi
done
# Check for failed status
if [[ -n $failed_plugin ]]; then
+ echo "Failed Plugins: $failed_plugin"
exit 1
fi
diff --git a/tox.ini b/tox.ini
index 4068054..230249f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = pep8,py36,py27,pip-check-reqs
+envlist = pep8,py36,py37,py27,pip-check-reqs
minversion = 2.3.1
skipsdist = True
@@ -197,11 +197,21 @@
whitelist_externals = rm
[testenv:pep8]
+deps =
+ -r{toxinidir}/test-requirements.txt
+ autopep8
basepython = python3
commands =
+ autopep8 --exit-code --max-line-length=79 --experimental --diff -r tempest setup.py
flake8 {posargs}
check-uuid
+[testenv:autopep8]
+deps = autopep8
+basepython = python3
+commands =
+ {toxinidir}/tools/format.sh
+
[testenv:uuidgen]
commands =
check-uuid --fix