Merge "allow create_server with vnic_type and port_profile from kwargs"
diff --git a/.zuul.yaml b/.zuul.yaml
index ef9b0eb..7b8bcfa 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -99,6 +99,7 @@
tox_envlist: full
devstack_localrc:
ENABLE_FILE_INJECTION: true
+ ENABLE_VOLUME_MULTIATTACH: true
- job:
name: tempest-full-oslo-master
@@ -161,6 +162,7 @@
devstack_localrc:
USE_PYTHON3: true
FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
devstack_services:
s-account: false
s-container: false
@@ -192,12 +194,8 @@
c-bak: false
- job:
- name: tempest-multinode-full
+ name: tempest-multinode-full-base
parent: devstack-tempest
- nodeset: openstack-two-node-bionic
- # Until the devstack changes are backported, only run this on master
- branches:
- - master
description: |
Base multinode integration test with Neutron networking and py27.
Former names for this job were:
@@ -222,6 +220,33 @@
LIVE_MIGRATION_AVAILABLE: true
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-bionic
+ # This job runs on Bionic from stable/stein on.
+ branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-xenial
+ # This job runs on Xenial and this is for stable/pike, stable/queens
+ # and stable/rocky. This job is prepared to make sure all stable branches
+ # before stable/stein will keep running on xenial. This job can be
+ # removed once stable/rocky is EOL.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+
+- job:
+ name: tempest-multinode-full-py3
+ parent: tempest-multinode-full
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+
- nodeset:
name: openstack-bionic-node
nodes:
@@ -254,8 +279,6 @@
- job:
name: tempest-slow
parent: tempest-multinode-full
- branches:
- - master
description: |
This multinode integration job will run all the tests tagged as slow.
It enables the lvm multibackend setup to cover few scenario tests.
@@ -269,7 +292,29 @@
tox_envlist: slow-serial
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
tempest_concurrency: 2
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+ name: tempest-slow-py3
+ parent: tempest-slow
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
- job:
name: tempest-full-rocky
@@ -306,7 +351,7 @@
parent: tox
description: |
Run tempest plugin sanity check script using tox.
- nodeset: ubuntu-xenial
+ nodeset: ubuntu-bionic
vars:
tox_envlist: plugin-sanity-check
voting: false
@@ -381,10 +426,12 @@
- git.openstack.org/openstack/sahara-tests
- git.openstack.org/openstack/senlin
- git.openstack.org/openstack/senlin-tempest-plugin
+ - git.openstack.org/openstack/solum-tempest-plugin
- git.openstack.org/openstack/tap-as-a-service
- git.openstack.org/openstack/telemetry-tempest-plugin
- git.openstack.org/openstack/tempest-horizon
- git.openstack.org/openstack/tobiko
+ - git.openstack.org/openstack/trio2o
- git.openstack.org/openstack/tripleo-common-tempest-plugin
- git.openstack.org/openstack/trove-tempest-plugin
- git.openstack.org/openstack/valet
@@ -454,6 +501,7 @@
- openstack-python-jobs
- openstack-python35-jobs
- openstack-python36-jobs
+ - openstack-python37-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
@@ -469,7 +517,7 @@
- ^playbooks/
- ^roles/
- ^.zuul.yaml$
- - nova-multiattach:
+ - tempest-full-parallel:
# Define list of irrelevant files to use everywhere else
irrelevant-files: &tempest-irrelevant-files
- ^(test-|)requirements.txt$
@@ -481,8 +529,6 @@
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
- ^tools/.*$
- - tempest-full-parallel:
- irrelevant-files: *tempest-irrelevant-files
- tempest-full-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-py3-ipv6:
@@ -500,6 +546,8 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full:
irrelevant-files: *tempest-irrelevant-files
+ - tempest-multinode-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
- tempest-tox-plugin-sanity-check:
irrelevant-files:
- ^(test-|)requirements.txt$
@@ -513,6 +561,8 @@
# tools/ is not here since this relies on a script in tools/.
- tempest-slow:
irrelevant-files: *tempest-irrelevant-files
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
voting: false
irrelevant-files: *tempest-irrelevant-files
@@ -544,7 +594,6 @@
- tempest-full:
irrelevant-files: *tempest-irrelevant-files
- interop-tempest-consistency:
- voting: false
irrelevant-files: *tempest-irrelevant-files
- tempest-full-test-account-py3:
voting: false
@@ -554,9 +603,7 @@
irrelevant-files: *tempest-irrelevant-files
gate:
jobs:
- - nova-multiattach:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-slow:
+ - tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 983fa24..4b1c145 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -338,8 +338,8 @@
.. _2.26: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id23
-* `2.28`_
-
+ * `2.28`_
+
.. _2.28: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id25
* `2.32`_
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index e249bdd..ff4fa09 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -128,8 +128,9 @@
people to respond to removal proposals please add things to the agenda by the
Monday before the meeting.
-The other option is to raise the removal on the openstack-dev mailing list.
-(for example see: http://lists.openstack.org/pipermail/openstack-dev/2016-February/086218.html )
+The other option is to raise the removal on the openstack-discuss mailing list.
+(for example see: http://lists.openstack.org/pipermail/openstack-dev/2016-February/086218.html
+or http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003574.html )
This will raise the issue to the wider community and attract at least the same
(most likely more) attention than discussing it during the irc meeting. The
only downside is that it might take more time to get a response, given the
diff --git a/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml b/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml
new file mode 100644
index 0000000..21b74a6
--- /dev/null
+++ b/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ Add basic read-only Placement client to Tempest to make possible the
+ testing of the placement based bandwidth allocation feature.
+ The following API calls are available for tempest from now:
+
+ * GET /allocation_candidates
+ * GET /allocations/{consumer_uuid}
+
+ Add new config group ``placement``, with the config options:
+
+ * ``endpoint_type`` to use for communication with placement service.
+ * ``catalog_type`` of the placement service.
+ * ``region`` as the placement region name to use.
+ * ``min_microversion`` and ``max_microversion`` as the range between
+ placement API requests are sent.
diff --git a/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml b/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml
new file mode 100644
index 0000000..c707f14
--- /dev/null
+++ b/releasenotes/notes/conditional-attr-a8564ec5a70ec840.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The ``tempest.lib.decorators.attr`` decorator now supports a ``condition``
+ kwarg which can be used to conditionally apply the attr to the test
+ function if the condition evaluates to True.
diff --git a/releasenotes/notes/correct-port-profile-config-option-d67f5cb31f1bc34c.yaml b/releasenotes/notes/correct-port-profile-config-option-d67f5cb31f1bc34c.yaml
new file mode 100644
index 0000000..7510d47
--- /dev/null
+++ b/releasenotes/notes/correct-port-profile-config-option-d67f5cb31f1bc34c.yaml
@@ -0,0 +1,17 @@
+---
+fixes:
+ - |
+ Patch https://review.openstack.org/#/c/499575/ introduced
+ support creating Neutron port with certain capabilities.
+ Currently capabilities list interpreted as string this change
+ fix it.
+
+ tempest.conf
+ [network]
+ port_profile = capabilities:[switchdev]
+
+ result:
+ {'capabilities':'[switchdev]'}
+
+ expected:
+ {'capabilities': ['switchdev']}
diff --git a/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml b/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml
new file mode 100644
index 0000000..0959b22
--- /dev/null
+++ b/releasenotes/notes/enable-volume-multiattach-fd5e9bf0e96b56ce.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - |
+ The ``tempest-full``, ``tempest-full-py3`` and ``tempest-slow`` zuul v3
+ job configurations now set ``ENABLE_VOLUME_MULTIATTACH: true`` in the
+ ``devstack_localrc`` variables section. If you have a plugin job
+ configuration that inherits from one of these jobs and the backend cinder
+ volume driver or nova compute driver do not support volume multiattach then
+ you should override and set this variable to
+ ``ENABLE_VOLUME_MULTIATTACH: false`` in your job configuration.
diff --git a/tempest/api/compute/admin/test_keypairs_v210.py b/tempest/api/compute/admin/test_keypairs_v210.py
index 24ea8a1..40ed532 100644
--- a/tempest/api/compute/admin/test_keypairs_v210.py
+++ b/tempest/api/compute/admin/test_keypairs_v210.py
@@ -56,7 +56,7 @@
self.assertEqual(first_keyname, keypair_detail['name'])
self.assertEqual(user_id, keypair_detail['user_id'],
"The fetched keypair is not for requested user!")
- # Create a admin keypair
+ # Create an admin keypair
admin_keypair = self.create_keypair(keypair_type='ssh',
client=self.client)
admin_keypair.pop('private_key', None)
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 5cd98f4..bebc8c5 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -105,7 +105,7 @@
asserts the servers are in the group and on different hosts.
"""
hosts = self._create_servers_with_group('anti-affinity')
- hostnames = hosts.values()
+ hostnames = list(hosts.values())
self.assertNotEqual(hostnames[0], hostnames[1],
'Servers are on the same host: %s' % hosts)
@@ -120,6 +120,6 @@
asserts the servers are in the group and on same host.
"""
hosts = self._create_servers_with_group('affinity')
- hostnames = hosts.values()
+ hostnames = list(hosts.values())
self.assertEqual(hostnames[0], hostnames[1],
'Servers are on the different hosts: %s' % hosts)
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 6b58939..cc83c04 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -148,6 +148,13 @@
# so it's marked as such.
@decorators.attr(type='slow')
@decorators.idempotent_id('e8f8f9d1-d7b7-4cd2-8213-ab85ef697b6e')
+ # For some reason this test intermittently fails on teardown when there are
+ # multiple compute nodes and the servers are split across the computes.
+ # For now, just skip this test if there are multiple computes.
+ # Alternatively we could put the servers in an affinity group if there are
+ # multiple computes but that would just side-step the underlying bug.
+ @decorators.skip_because(bug='1807723',
+ condition=CONF.compute.min_compute_nodes > 1)
@utils.services('volume')
def test_volume_swap_with_multiattach(self):
# Create two volumes.
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 09dd409..624a99e 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -457,7 +457,7 @@
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
- raise exceptions.InvalidParam(invalid_param=msg)
+ raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 05c2a28..bea23d9 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -256,8 +256,8 @@
if not (CONF.auth.use_dynamic_credentials and
CONF.auth.create_isolated_networks and
not CONF.network.shared_physical_network):
- raise self.skipException("Only owner network supports "
- "creating interface by fixed ip.")
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index d40f937..e817587 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from oslo_log import log as logging
+from oslo_serialization import jsonutils as json
from tempest.api.compute import base
from tempest.common import utils
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 5801db1..daf6a06 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -143,7 +143,7 @@
data_length = len(data) if data is not None else 0
self.assertFalse(data_length <= 24 or
data_length != (struct.unpack(">L",
- data[20:24])[0] + 24),
+ data[20:24])[0] + 24),
'Server initialization was not the right format.')
# Since the rest of the data on the screen is arbitrary, we will
# close the socket and end our validation of the data at this point
@@ -151,7 +151,7 @@
# initialization was the right format
self.assertFalse(data_length <= 24 or
data_length != (struct.unpack(">L",
- data[20:24])[0] + 24))
+ data[20:24])[0] + 24))
def _validate_websocket_upgrade(self):
self.assertTrue(
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index f7b5b4b..8bb4eaa 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -65,6 +65,8 @@
class AttachVolumeTestJSON(BaseAttachVolumeTest):
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
+ # This test is conditionally marked slow if SSH validation is enabled.
+ @decorators.attr(type='slow', condition=CONF.validation.run_validation)
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index a79cbc3..73fddb7 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -21,7 +21,7 @@
CONF = config.CONF
-class TestDefaultProjectId (base.BaseIdentityV3AdminTest):
+class TestDefaultProjectId(base.BaseIdentityV3AdminTest):
@classmethod
def setup_credentials(cls):
@@ -57,9 +57,10 @@
# create a user in the domain, with the previous project as his
# default project
user_name = data_utils.rand_name('user')
+ user_pass = data_utils.rand_password()
user_body = self.users_client.create_user(
name=user_name,
- password=user_name,
+ password=user_pass,
domain_id=dom_id,
default_project_id=proj_id)['user']
user_id = user_body['id']
@@ -78,7 +79,7 @@
# create a new client with user's credentials (NOTE: unscoped token!)
creds = auth.KeystoneV3Credentials(username=user_name,
- password=user_name,
+ password=user_pass,
user_domain_name=dom_name)
auth_provider = clients.get_auth_provider(creds)
creds = auth_provider.fill_credentials()
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index c4e0622..c0b18ca 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -21,6 +21,10 @@
class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
custom_config = {
"identity": {
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 72b6be4..07175f4 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -153,18 +153,3 @@
expected_data = {'name': d_name, 'enabled': True}
self.assertEqual('', domain['description'])
self.assertDictContainsSubset(expected_data, domain)
-
-
-class DefaultDomainTestJSON(base.BaseIdentityV3AdminTest):
-
- @classmethod
- def resource_setup(cls):
- cls.domain_id = CONF.identity.default_domain_id
- super(DefaultDomainTestJSON, cls).resource_setup()
-
- @decorators.attr(type='smoke')
- @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
- def test_default_domain_exists(self):
- domain = self.domains_client.show_domain(self.domain_id)['domain']
-
- self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index 56f7d32..b3c68fb 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -20,6 +20,10 @@
class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.attr(type=['negative', 'gate'])
@decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 37ce266..df0d79d 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
from tempest import config
@@ -22,6 +23,10 @@
class GroupsV3TestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def resource_setup(cls):
@@ -68,6 +73,10 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('1598521a-2f36-4606-8df9-30772bd51339')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_group_users_add_list_delete(self):
group = self.setup_test_group(domain_id=self.domain['id'])
# add user into group
@@ -90,6 +99,10 @@
self.assertEqual(len(group_users), 0)
@decorators.idempotent_id('64573281-d26a-4a52-b899-503cb0f4e4ec')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_list_user_groups(self):
# create a user
user = self.create_test_user()
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index 68c0225..ef6aff0 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -9,12 +9,16 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
from tempest.common import utils
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class InheritsV3TestJSON(base.BaseIdentityV3AdminTest):
@@ -30,7 +34,7 @@
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
- u_password = data_utils.rand_name('pass-')
+ u_password = data_utils.rand_password()
cls.domain = cls.create_domain()
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project-'),
@@ -43,18 +47,26 @@
domain_id=cls.domain['id'])['group']
cls.addClassResourceCleanup(cls.groups_client.delete_group,
cls.group['id'])
- cls.user = cls.users_client.create_user(
- name=u_name, description=u_desc, password=u_password,
- email=u_email, project_id=cls.project['id'],
- domain_id=cls.domain['id'])['user']
- cls.addClassResourceCleanup(cls.users_client.delete_user,
- cls.user['id'])
+ if not CONF.identity_feature_enabled.immutable_user_source:
+ cls.user = cls.users_client.create_user(
+ name=u_name,
+ description=u_desc,
+ password=u_password,
+ email=u_email,
+ project_id=cls.project['id'],
+ domain_id=cls.domain['id']
+ )['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user['id'])
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
@decorators.idempotent_id('4e6f0366-97c8-423c-b2be-41eae6ac91c8')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_check_revoke_roles_on_domains_user(self):
# Create role
src_role = self.setup_test_role()
@@ -103,6 +115,9 @@
self.domain['id'], self.group['id'], src_role['id'])
@decorators.idempotent_id('18b70e45-7687-4b72-8277-b8f1a47d7591')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_check_revoke_roles_on_projects_user(self):
# Create role
src_role = self.setup_test_role()
@@ -134,6 +149,9 @@
self.project['id'], self.group['id'], src_role['id']))
@decorators.idempotent_id('3acf666e-5354-42ac-8e17-8b68893bcd36')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_revoke_user_roles_on_domain(self):
# Create role
src_role = self.setup_test_role()
@@ -178,6 +196,9 @@
self.assertEmpty(assignments)
@decorators.idempotent_id('9f02ccd9-9b57-46b4-8f77-dd5a736f3a06')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
def test_inherit_assign_list_revoke_user_roles_on_project_tree(self):
# Create role
src_role = self.setup_test_role()
diff --git a/tempest/api/identity/admin/v3/test_project_tags.py b/tempest/api/identity/admin/v3/test_project_tags.py
index d05173b..b7878a8 100644
--- a/tempest/api/identity/admin/v3/test_project_tags.py
+++ b/tempest/api/identity/admin/v3/test_project_tags.py
@@ -25,6 +25,10 @@
class IdentityV3ProjectTagsTest(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.idempotent_id('7c123aac-999d-416a-a0fb-84b915ab10de')
@testtools.skipUnless(CONF.identity_feature_enabled.project_tags,
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index f75edaa..0b85b19 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -12,13 +12,21 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.idempotent_id('0ecf465c-0dc4-4532-ab53-91ffeb74d12d')
def test_project_create_with_description(self):
@@ -176,6 +184,10 @@
self.assertEqual(resp2_en, resp3_en)
@decorators.idempotent_id('59398d4a-5dc5-4f86-9a4c-c26cc804d6c6')
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
def test_associate_user_to_project(self):
# Associate a user to a project
# Create a Project
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index f22a528..c8c0151 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -20,6 +20,10 @@
class RegionsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 8ae43d6..5f1b58d 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -42,10 +42,10 @@
user = self.create_test_user(password=user_password)
# Create a couple projects
- project1_name = data_utils.rand_name(name='project')
+ project1_name = data_utils.rand_name(name=self.__class__.__name__)
project1 = self.setup_test_project(name=project1_name)
- project2_name = data_utils.rand_name(name='project')
+ project2_name = data_utils.rand_name(name=self.__class__.__name__)
project2 = self.setup_test_project(name=project2_name)
self.addCleanup(self.projects_client.delete_project, project2['id'])
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 83b3c30..54a5ab7 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -49,7 +49,8 @@
def create_trustor_and_roles(self):
# create a project that trusts will be granted on
- trustor_project_name = data_utils.rand_name(name='project')
+ trustor_project_name = data_utils.rand_name(
+ name=self.__class__.__name__)
project = self.projects_client.create_project(
trustor_project_name,
domain_id=CONF.identity.default_domain_id)['project']
diff --git a/tempest/api/identity/v3/test_domains.py b/tempest/api/identity/v3/test_domains.py
new file mode 100644
index 0000000..9f132dd
--- /dev/null
+++ b/tempest/api/identity/v3/test_domains.py
@@ -0,0 +1,39 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class DefaultDomainTestJSON(base.BaseIdentityV3Test):
+
+ @classmethod
+ def setup_clients(cls):
+ super(DefaultDomainTestJSON, cls).setup_clients()
+ cls.domains_client = cls.os_primary.domains_client
+
+ @classmethod
+ def resource_setup(cls):
+ super(DefaultDomainTestJSON, cls).resource_setup()
+ cls.domain_id = CONF.identity.default_domain_id
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
+ def test_default_domain_exists(self):
+ domain = self.domains_client.show_domain(self.domain_id)['domain']
+ self.assertTrue(domain['enabled'])
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
deleted file mode 100644
index 033bf55..0000000
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.network import base
-from tempest.common import utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-AGENT_TYPE = 'L3 agent'
-AGENT_MODES = (
- 'legacy',
- 'dvr_snat'
-)
-
-
-class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
- """Tests the following operations in the Neutron API:
-
- List routers that the given L3 agent is hosting.
- List L3 agents hosting the given router.
- Add and Remove Router to L3 agent
-
- v2.0 of the Neutron API is assumed.
-
- The l3_agent_scheduler extension is required for these tests.
- """
-
- @classmethod
- def skip_checks(cls):
- super(L3AgentSchedulerTestJSON, cls).skip_checks()
- if not utils.is_extension_enabled('l3_agent_scheduler', 'network'):
- msg = "L3 Agent Scheduler Extension not enabled."
- raise cls.skipException(msg)
-
- @classmethod
- def resource_setup(cls):
- super(L3AgentSchedulerTestJSON, cls).resource_setup()
- agents = cls.admin_agents_client.list_agents(
- agent_type=AGENT_TYPE)['agents']
- for agent in agents:
- if (agent['configurations']['agent_mode'] in AGENT_MODES and
- agent['alive']):
- cls.agent = agent
- break
- else:
- msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
- raise exceptions.InvalidConfiguration(msg)
- cls.router = cls.create_router()
-
- @decorators.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
- def test_list_routers_on_l3_agent(self):
- self.admin_agents_client.list_routers_on_l3_agent(self.agent['id'])
-
- @decorators.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
- def test_add_list_remove_router_on_l3_agent(self):
- l3_agent_ids = list()
- self.admin_agents_client.create_router_on_l3_agent(
- self.agent['id'],
- router_id=self.router['id'])
- body = (
- self.admin_routers_client.list_l3_agents_hosting_router(
- self.router['id']))
- for agent in body['agents']:
- l3_agent_ids.append(agent['id'])
- self.assertIn('agent_type', agent)
- self.assertEqual('L3 agent', agent['agent_type'])
- self.assertIn(self.agent['id'], l3_agent_ids)
- body = self.admin_agents_client.delete_router_from_l3_agent(
- self.agent['id'],
- self.router['id'])
- # NOTE(afazekas): The deletion not asserted, because neutron
- # is not forbidden to reschedule the router to the same agent
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index a075b51..9d1e2a7 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -30,7 +30,7 @@
It is also assumed that the per-project quota extension API is configured
in /etc/neutron/neutron.conf as follows:
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
"""
@classmethod
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index b1e4a58..ef5ebb6 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -35,7 +35,7 @@
It is also assumed that the per-project quota extension API is configured
in /etc/neutron/neutron.conf as follows:
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
"""
@classmethod
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 3ab2909..eb31ed3 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -206,7 +206,7 @@
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
- subnet_slaac]]
+ subnet_slaac]]
self.ports_client.delete_port(port['id'])
self.ports.pop()
body = self.ports_client.list_ports()
@@ -257,7 +257,7 @@
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
- subnet_slaac]]
+ subnet_slaac]]
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7345fd1..ed8eb52 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -317,7 +317,7 @@
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
- 'dns_nameservers',
+ 'dns_nameservers',
'allocation_pools']))
subnet_id = subnet['id']
new_gateway = str(netaddr.IPAddress(
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 2c9159c..25976ce 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -107,7 +107,7 @@
address = self.cidr
address.prefixlen = self.mask_bits
if ((address.version == 4 and address.prefixlen >= 30) or
- (address.version == 6 and address.prefixlen >= 126)):
+ (address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 25e91aa..7ea0099 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -291,13 +291,16 @@
def main(opts=None):
- setup_logging()
+ log_warning = False
if not opts:
- LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
- "please use: 'tempest account-generator'")
+ log_warning = True
opts = get_options()
if opts.config_file:
config.CONF.set_config_path(opts.config_file)
+ setup_logging()
+ if log_warning:
+ LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
+ "please use: 'tempest account-generator'")
if opts.os_tenant_name:
LOG.warning("'os-tenant-name' and 'OS_TENANT_NAME' are both "
"deprecated, please use 'os-project-name' or "
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 2f54f9a..e6db2e9 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -197,7 +197,7 @@
**kwargs))
kwargs = {'data': project_data,
'is_dry_run': is_dry_run,
- 'saved_state_json': None,
+ 'saved_state_json': self.json_data,
'is_preserve': is_preserve,
'is_save_state': False,
'project_id': project_id}
@@ -305,13 +305,17 @@
svc = service(admin_mgr, **kwargs)
svc.run()
- with open(SAVED_STATE_JSON, 'w+') as f:
- f.write(json.dumps(data,
- sort_keys=True, indent=2, separators=(',', ': ')))
+ for service in self.project_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
- def _load_json(self):
+ with open(SAVED_STATE_JSON, 'w+') as f:
+ f.write(json.dumps(data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
+
+ def _load_json(self, saved_state_json=SAVED_STATE_JSON):
try:
- with open(SAVED_STATE_JSON) as json_file:
+ with open(saved_state_json, 'rb') as json_file:
self.json_data = json.load(json_file)
except IOError as ex:
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 1a08246..3aed4e8 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -101,7 +101,7 @@
self.tenant_filter = {}
if hasattr(self, 'tenant_id'):
- self.tenant_filter['tenant_id'] = self.tenant_id
+ self.tenant_filter['project_id'] = self.tenant_id
def _filter_by_tenant_id(self, item_list):
if (item_list is None or
@@ -144,6 +144,10 @@
def list(self):
client = self.client
snaps = client.list_snapshots()['snapshots']
+ if not self.is_save_state:
+ # recreate list removing saved snapshots
+ snaps = [snap for snap in snaps if snap['id']
+ not in self.saved_state_json['snapshots'].keys()]
LOG.debug("List count, %s Snapshots", len(snaps))
return snaps
@@ -160,6 +164,12 @@
snaps = self.list()
self.data['snapshots'] = snaps
+ def save_state(self):
+ snaps = self.list()
+ self.data['snapshots'] = {}
+ for snap in snaps:
+ self.data['snapshots'][snap['id']] = snap['name']
+
class ServerService(BaseService):
def __init__(self, manager, **kwargs):
@@ -171,6 +181,10 @@
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
+ if not self.is_save_state:
+ # recreate list removing saved servers
+ servers = [server for server in servers if server['id']
+ not in self.saved_state_json['servers'].keys()]
LOG.debug("List count, %s Servers", len(servers))
return servers
@@ -187,17 +201,27 @@
servers = self.list()
self.data['servers'] = servers
+ def save_state(self):
+ servers = self.list()
+ self.data['servers'] = {}
+ for server in servers:
+ self.data['servers'][server['id']] = server['name']
+
class ServerGroupService(ServerService):
def list(self):
client = self.server_groups_client
sgs = client.list_server_groups()['server_groups']
+ if not self.is_save_state:
+ # recreate list removing saved server_groups
+ sgs = [sg for sg in sgs if sg['id']
+ not in self.saved_state_json['server_groups'].keys()]
LOG.debug("List count, %s Server Groups", len(sgs))
return sgs
def delete(self):
- client = self.client
+ client = self.server_groups_client
sgs = self.list()
for sg in sgs:
try:
@@ -209,6 +233,12 @@
sgs = self.list()
self.data['server_groups'] = sgs
+ def save_state(self):
+ sgs = self.list()
+ self.data['server_groups'] = {}
+ for sg in sgs:
+ self.data['server_groups'][sg['id']] = sg['name']
+
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
@@ -218,6 +248,11 @@
def list(self):
client = self.client
keypairs = client.list_keypairs()['keypairs']
+ if not self.is_save_state:
+ # recreate list removing saved keypairs
+ keypairs = [keypair for keypair in keypairs
+ if keypair['keypair']['name']
+ not in self.saved_state_json['keypairs'].keys()]
LOG.debug("List count, %s Keypairs", len(keypairs))
return keypairs
@@ -235,56 +270,12 @@
keypairs = self.list()
self.data['keypairs'] = keypairs
-
-class SecurityGroupService(BaseService):
- def __init__(self, manager, **kwargs):
- super(SecurityGroupService, self).__init__(kwargs)
- self.client = manager.compute_security_groups_client
-
- def list(self):
- client = self.client
- secgrps = client.list_security_groups()['security_groups']
- secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
- LOG.debug("List count, %s Security Groups", len(secgrp_del))
- return secgrp_del
-
- def delete(self):
- client = self.client
- secgrp_del = self.list()
- for g in secgrp_del:
- try:
- client.delete_security_group(g['id'])
- except Exception:
- LOG.exception("Delete Security Groups exception.")
-
- def dry_run(self):
- secgrp_del = self.list()
- self.data['security_groups'] = secgrp_del
-
-
-class FloatingIpService(BaseService):
- def __init__(self, manager, **kwargs):
- super(FloatingIpService, self).__init__(kwargs)
- self.client = manager.compute_floating_ips_client
-
- def list(self):
- client = self.client
- floating_ips = client.list_floating_ips()['floating_ips']
- LOG.debug("List count, %s Floating IPs", len(floating_ips))
- return floating_ips
-
- def delete(self):
- client = self.client
- floating_ips = self.list()
- for f in floating_ips:
- try:
- client.delete_floating_ip(f['id'])
- except Exception:
- LOG.exception("Delete Floating IPs exception.")
-
- def dry_run(self):
- floating_ips = self.list()
- self.data['floating_ips'] = floating_ips
+ def save_state(self):
+ keypairs = self.list()
+ self.data['keypairs'] = {}
+ for keypair in keypairs:
+ keypair = keypair['keypair']
+ self.data['keypairs'][keypair['name']] = keypair
class VolumeService(BaseService):
@@ -295,6 +286,10 @@
def list(self):
client = self.client
vols = client.list_volumes()['volumes']
+ if not self.is_save_state:
+ # recreate list removing saved volumes
+ vols = [vol for vol in vols if vol['id']
+ not in self.saved_state_json['volumes'].keys()]
LOG.debug("List count, %s Volumes", len(vols))
return vols
@@ -311,6 +306,12 @@
vols = self.list()
self.data['volumes'] = vols
+ def save_state(self):
+ vols = self.list()
+ self.data['volumes'] = {}
+ for vol in vols:
+ self.data['volumes'][vol['id']] = vol['name']
+
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
@@ -320,13 +321,13 @@
def delete(self):
client = self.client
try:
- client.delete_quota_set(self.tenant_id)
+ client.delete_quota_set(self.project_id)
except Exception:
LOG.exception("Delete Volume Quotas exception.")
def dry_run(self):
quotas = self.client.show_quota_set(
- self.tenant_id, params={'usage': True})['quota_set']
+ self.project_id, params={'usage': True})['quota_set']
self.data['volume_quotas'] = quotas
@@ -339,7 +340,7 @@
def delete(self):
client = self.client
try:
- client.delete_quota_set(self.tenant_id)
+ client.delete_quota_set(self.project_id)
except Exception:
LOG.exception("Delete Quotas exception.")
@@ -350,9 +351,9 @@
# Begin network service classes
-class NetworkService(BaseService):
+class BaseNetworkService(BaseService):
def __init__(self, manager, **kwargs):
- super(NetworkService, self).__init__(kwargs)
+ super(BaseNetworkService, self).__init__(kwargs)
self.networks_client = manager.networks_client
self.subnets_client = manager.subnets_client
self.ports_client = manager.ports_client
@@ -361,6 +362,7 @@
self.metering_label_rules_client = manager.metering_label_rules_client
self.security_groups_client = manager.security_groups_client
self.routers_client = manager.routers_client
+ self.subnetpools_client = manager.subnetpools_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
@@ -369,10 +371,18 @@
return [item for item in item_list if item['network_id']
not in CONF_NETWORKS]
+
+class NetworkService(BaseNetworkService):
+
def list(self):
client = self.networks_client
networks = client.list_networks(**self.tenant_filter)
networks = networks['networks']
+
+ if not self.is_save_state:
+ # recreate list removing saved networks
+ networks = [network for network in networks if network['id']
+ not in self.saved_state_json['networks'].keys()]
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
@@ -393,18 +403,29 @@
networks = self.list()
self.data['networks'] = networks
+ def save_state(self):
+ networks = self.list()
+ self.data['networks'] = {}
+ for network in networks:
+ self.data['networks'][network['id']] = network
-class NetworkFloatingIpService(NetworkService):
+
+class NetworkFloatingIpService(BaseNetworkService):
def list(self):
client = self.floating_ips_client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
+
+ if not self.is_save_state:
+ # recreate list removing saved flips
+ flips = [flip for flip in flips if flip['id']
+ not in self.saved_state_json['floatingips'].keys()]
LOG.debug("List count, %s Network Floating IPs", len(flips))
return flips
def delete(self):
- client = self.client
+ client = self.floating_ips_client
flips = self.list()
for flip in flips:
try:
@@ -414,15 +435,26 @@
def dry_run(self):
flips = self.list()
- self.data['floating_ips'] = flips
+ self.data['floatingips'] = flips
+
+ def save_state(self):
+ flips = self.list()
+ self.data['floatingips'] = {}
+ for flip in flips:
+ self.data['floatingips'][flip['id']] = flip
-class NetworkRouterService(NetworkService):
+class NetworkRouterService(BaseNetworkService):
def list(self):
client = self.routers_client
routers = client.list_routers(**self.tenant_filter)
routers = routers['routers']
+
+ if not self.is_save_state:
+ # recreate list removing saved routers
+ routers = [router for router in routers if router['id']
+ not in self.saved_state_json['routers'].keys()]
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
@@ -450,101 +482,11 @@
routers = self.list()
self.data['routers'] = routers
-
-class NetworkHealthMonitorService(NetworkService):
-
- def list(self):
- client = self.client
- hms = client.list_health_monitors()
- hms = hms['health_monitors']
- hms = self._filter_by_tenant_id(hms)
- LOG.debug("List count, %s Health Monitors", len(hms))
- return hms
-
- def delete(self):
- client = self.client
- hms = self.list()
- for hm in hms:
- try:
- client.delete_health_monitor(hm['id'])
- except Exception:
- LOG.exception("Delete Health Monitor exception.")
-
- def dry_run(self):
- hms = self.list()
- self.data['health_monitors'] = hms
-
-
-class NetworkMemberService(NetworkService):
-
- def list(self):
- client = self.client
- members = client.list_members()
- members = members['members']
- members = self._filter_by_tenant_id(members)
- LOG.debug("List count, %s Members", len(members))
- return members
-
- def delete(self):
- client = self.client
- members = self.list()
- for member in members:
- try:
- client.delete_member(member['id'])
- except Exception:
- LOG.exception("Delete Member exception.")
-
- def dry_run(self):
- members = self.list()
- self.data['members'] = members
-
-
-class NetworkVipService(NetworkService):
-
- def list(self):
- client = self.client
- vips = client.list_vips()
- vips = vips['vips']
- vips = self._filter_by_tenant_id(vips)
- LOG.debug("List count, %s VIPs", len(vips))
- return vips
-
- def delete(self):
- client = self.client
- vips = self.list()
- for vip in vips:
- try:
- client.delete_vip(vip['id'])
- except Exception:
- LOG.exception("Delete VIP exception.")
-
- def dry_run(self):
- vips = self.list()
- self.data['vips'] = vips
-
-
-class NetworkPoolService(NetworkService):
-
- def list(self):
- client = self.client
- pools = client.list_pools()
- pools = pools['pools']
- pools = self._filter_by_tenant_id(pools)
- LOG.debug("List count, %s Pools", len(pools))
- return pools
-
- def delete(self):
- client = self.client
- pools = self.list()
- for pool in pools:
- try:
- client.delete_pool(pool['id'])
- except Exception:
- LOG.exception("Delete Pool exception.")
-
- def dry_run(self):
- pools = self.list()
- self.data['pools'] = pools
+ def save_state(self):
+ routers = self.list()
+ self.data['routers'] = {}
+ for router in routers:
+ self.data['routers'][router['id']] = router['name']
class NetworkMeteringLabelRuleService(NetworkService):
@@ -554,6 +496,11 @@
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
+
+ if not self.is_save_state:
+ saved_rules = self.saved_state_json['metering_label_rules'].keys()
+ # recreate list removing saved rules
+ rules = [rule for rule in rules if rule['id'] not in saved_rules]
LOG.debug("List count, %s Metering Label Rules", len(rules))
return rules
@@ -568,16 +515,27 @@
def dry_run(self):
rules = self.list()
- self.data['rules'] = rules
+ self.data['metering_label_rules'] = rules
+
+ def save_state(self):
+ rules = self.list()
+ self.data['metering_label_rules'] = {}
+ for rule in rules:
+ self.data['metering_label_rules'][rule['id']] = rule
-class NetworkMeteringLabelService(NetworkService):
+class NetworkMeteringLabelService(BaseNetworkService):
def list(self):
client = self.metering_labels_client
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
+
+ if not self.is_save_state:
+ # recreate list removing saved labels
+ labels = [label for label in labels if label['id']
+ not in self.saved_state_json['metering_labels'].keys()]
LOG.debug("List count, %s Metering Labels", len(labels))
return labels
@@ -592,10 +550,16 @@
def dry_run(self):
labels = self.list()
- self.data['labels'] = labels
+ self.data['metering_labels'] = labels
+
+ def save_state(self):
+ labels = self.list()
+ self.data['metering_labels'] = {}
+ for label in labels:
+ self.data['metering_labels'][label['id']] = label['name']
-class NetworkPortService(NetworkService):
+class NetworkPortService(BaseNetworkService):
def list(self):
client = self.ports_client
@@ -604,6 +568,10 @@
if port["device_owner"] == "" or
port["device_owner"].startswith("compute:")]
+ if not self.is_save_state:
+ # recreate list removing saved ports
+ ports = [port for port in ports if port['id']
+ not in self.saved_state_json['ports'].keys()]
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
@@ -623,8 +591,14 @@
ports = self.list()
self.data['ports'] = ports
+ def save_state(self):
+ ports = self.list()
+ self.data['ports'] = {}
+ for port in ports:
+ self.data['ports'][port['id']] = port['name']
-class NetworkSecGroupService(NetworkService):
+
+class NetworkSecGroupService(BaseNetworkService):
def list(self):
client = self.security_groups_client
filter = self.tenant_filter
@@ -633,31 +607,48 @@
client.list_security_groups(**filter)['security_groups']
if secgroup['name'] != 'default']
+ if not self.is_save_state:
+ # recreate list removing saved security_groups
+ secgroups = [secgroup for secgroup in secgroups if secgroup['id']
+ not in self.saved_state_json['security_groups'].keys()
+ ]
if self.is_preserve:
- secgroups = self._filter_by_conf_networks(secgroups)
+ secgroups = [secgroup for secgroup in secgroups
+ if secgroup['security_group_rules'][0]['project_id']
+ not in CONF_PROJECTS]
LOG.debug("List count, %s security_groups", len(secgroups))
return secgroups
def delete(self):
- client = self.client
+ client = self.security_groups_client
secgroups = self.list()
for secgroup in secgroups:
try:
- client.delete_secgroup(secgroup['id'])
+ client.delete_security_group(secgroup['id'])
except Exception:
LOG.exception("Delete security_group exception.")
def dry_run(self):
secgroups = self.list()
- self.data['secgroups'] = secgroups
+ self.data['security_groups'] = secgroups
+
+ def save_state(self):
+ secgroups = self.list()
+ self.data['security_groups'] = {}
+ for secgroup in secgroups:
+ self.data['security_groups'][secgroup['id']] = secgroup['name']
-class NetworkSubnetService(NetworkService):
+class NetworkSubnetService(BaseNetworkService):
def list(self):
client = self.subnets_client
subnets = client.list_subnets(**self.tenant_filter)
subnets = subnets['subnets']
+ if not self.is_save_state:
+ # recreate list removing saved subnets
+ subnets = [subnet for subnet in subnets if subnet['id']
+ not in self.saved_state_json['subnets'].keys()]
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets", len(subnets))
@@ -676,6 +667,47 @@
subnets = self.list()
self.data['subnets'] = subnets
+ def save_state(self):
+ subnets = self.list()
+ self.data['subnets'] = {}
+ for subnet in subnets:
+ self.data['subnets'][subnet['id']] = subnet['name']
+
+
+class NetworkSubnetPoolsService(BaseNetworkService):
+
+ def list(self):
+ client = self.subnetpools_client
+ pools = client.list_subnetpools(**self.tenant_filter)['subnetpools']
+ if not self.is_save_state:
+ # recreate list removing saved subnet pools
+ pools = [pool for pool in pools if pool['id']
+ not in self.saved_state_json['subnetpools'].keys()]
+ if self.is_preserve:
+ pools = [pool for pool in pools if pool['project_id']
+ not in CONF_PROJECTS]
+ LOG.debug("List count, %s Subnet Pools", len(pools))
+ return pools
+
+ def delete(self):
+ client = self.subnetpools_client
+ pools = self.list()
+ for pool in pools:
+ try:
+ client.delete_subnetpool(pool['id'])
+ except Exception:
+ LOG.exception("Delete Subnet Pool exception.")
+
+ def dry_run(self):
+ pools = self.list()
+ self.data['subnetpools'] = pools
+
+ def save_state(self):
+ pools = self.list()
+ self.data['subnetpools'] = {}
+ for pool in pools:
+ self.data['subnetpools'][pool['id']] = pool['name']
+
# begin global services
class FlavorService(BaseService):
@@ -754,12 +786,6 @@
self.data['images'][image['id']] = image['name']
-class IdentityService(BaseService):
- def __init__(self, manager, **kwargs):
- super(IdentityService, self).__init__(kwargs)
- self.client = manager.identity_v3_client
-
-
class UserService(BaseService):
def __init__(self, manager, **kwargs):
@@ -929,10 +955,7 @@
if IS_NOVA:
project_services.append(ServerService)
project_services.append(KeyPairService)
- project_services.append(SecurityGroupService)
project_services.append(ServerGroupService)
- if not IS_NEUTRON:
- project_services.append(FloatingIpService)
project_services.append(NovaQuotaService)
if IS_NEUTRON:
project_services.append(NetworkFloatingIpService)
@@ -944,6 +967,7 @@
project_services.append(NetworkSubnetService)
project_services.append(NetworkService)
project_services.append(NetworkSecGroupService)
+ project_services.append(NetworkSubnetPoolsService)
if IS_CINDER:
project_services.append(SnapshotService)
project_services.append(VolumeService)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 3e84b82..bb3fe63 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -202,8 +202,8 @@
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
- f.write(json.dumps(data,
- sort_keys=True, indent=2, separators=(',', ': ')))
+ f.write(json.dumps(data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
@@ -252,6 +252,7 @@
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
+ type=int, default=0,
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 8dcf575..081fa7a 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -78,11 +78,11 @@
import argparse
import collections
import io
-import json
import os
import re
import sys
+from oslo_serialization import jsonutils as json
import subunit
import testtools
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 375113d..1489e60 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -168,10 +168,19 @@
'imageRef': image_id,
'size': CONF.volume.volume_size}
volume = volumes_client.create_volume(**params)
- waiters.wait_for_volume_resource_status(volumes_client,
- volume['volume']['id'],
- 'available')
-
+ try:
+ waiters.wait_for_volume_resource_status(volumes_client,
+ volume['volume']['id'],
+ 'available')
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ try:
+ volumes_client.delete_volume(volume['volume']['id'])
+ volumes_client.wait_for_resource_deletion(
+ volume['volume']['id'])
+ except Exception as exc:
+ LOG.exception("Deleting volume %s failed, exception %s",
+ volume['volume']['id'], exc)
bd_map_v2 = [{
'uuid': volume['volume']['id'],
'source_type': 'volume',
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index 525110b..cd6d058 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -26,7 +26,7 @@
if project['name'] == project_name:
return project
raise lib_exc.NotFound('No such project(%s) in %s' % (project_name,
- projects))
+ projects))
def get_tenant_by_name(client, tenant_name):
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 52ccfa9..49d9742 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -98,6 +98,7 @@
def get_nic_name_by_ip(self, address):
cmd = "ip -o addr | awk '/%s/ {print $2}'" % address
nic = self.exec_command(cmd)
+ LOG.debug('(get_nic_name_by_ip) Command result: %s', nic)
return nic.strip().strip(":").split('@')[0].lower()
def get_dns_servers(self):
diff --git a/tempest/common/utils/net_utils.py b/tempest/common/utils/net_utils.py
index 867b3dd..b697ef1 100644
--- a/tempest/common/utils/net_utils.py
+++ b/tempest/common/utils/net_utils.py
@@ -19,7 +19,6 @@
def get_unused_ip_addresses(ports_client, subnets_client,
network_id, subnet_id, count):
-
"""Return a list with the specified number of unused IP addresses
This method uses the given ports_client to find the specified number of
diff --git a/tempest/config.py b/tempest/config.py
index 716c000..dc95812 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -20,6 +20,7 @@
from oslo_concurrency import lockutils
from oslo_config import cfg
+from oslo_config import types
from oslo_log import log as logging
from tempest.lib import exceptions
@@ -365,6 +366,38 @@
"with format 'X.Y' or string 'latest'"),
]
+placement_group = cfg.OptGroup(name='placement',
+ title='Placement Service Options')
+
+PlacementGroup = [
+ cfg.StrOpt('endpoint_type',
+ default='public',
+ choices=['public', 'admin', 'internal'],
+ help="The endpoint type to use for the placement service."),
+ cfg.StrOpt('catalog_type',
+ default='placement',
+ help="Catalog type of the Placement service."),
+ cfg.StrOpt('region',
+ default='RegionOne',
+ help="The placement region name to use. If empty, the value "
+ "of [identity]/region is used instead. If no such region "
+ "is found in the service catalog, the first region found "
+ "is used."),
+ cfg.StrOpt('min_microversion',
+ default=None,
+ help="Lower version of the test target microversion range. "
+ "The format is 'X.Y', where 'X' and 'Y' are int values. "
+ "Valid values are string with format 'X.Y' or string "
+ "'latest'"),
+ cfg.StrOpt('max_microversion',
+ default=None,
+ help="Upper version of the test target microversion range. "
+ "The format is 'X.Y', where 'X' and 'Y' are int values. "
+ "Valid values are string with format 'X.Y' or string "
+ "'latest'"),
+]
+
+
compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
title="Enabled Compute Service Features")
@@ -595,6 +628,7 @@
network_group = cfg.OptGroup(name='network',
title='Network Service Options')
+ProfileType = types.Dict(types.List(types.String(), bounds=True))
NetworkGroup = [
cfg.StrOpt('catalog_type',
default='network',
@@ -658,10 +692,11 @@
" with pre-configured ports."
" Supported ports are:"
" ['normal','direct','macvtap']"),
- cfg.DictOpt('port_profile',
- default={},
- help="port profile to use when launching instances"
- " with pre-configured ports."),
+ cfg.Opt('port_profile',
+ type=ProfileType,
+ default={},
+ help="port profile to use when launching instances"
+ " with pre-configured ports."),
cfg.ListOpt('default_network',
default=["1.0.0.0/16", "2.0.0.0/16"],
help="List of ip pools"
@@ -1096,6 +1131,7 @@
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
+ (placement_group, PlacementGroup),
(None, DefaultGroup)
]
@@ -1213,7 +1249,7 @@
logging_cfg_path = "%s/logging.conf" % os.path.dirname(path)
if ((not hasattr(_CONF, 'log_config_append') or
- _CONF.log_config_append is None) and
+ _CONF.log_config_append is None) and
os.path.isfile(logging_cfg_path)):
# if logging conf is in place we need to set log_config_append
_CONF.log_config_append = logging_cfg_path
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 82fcd0b..71ecb32 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -110,7 +110,7 @@
for item in files:
if item.endswith('.py'):
module_name = '.'.join((root_package,
- os.path.splitext(item)[0]))
+ os.path.splitext(item)[0]))
if not module_name.startswith(UNIT_TESTS_EXCLUDE):
modules.append(module_name)
return modules
@@ -233,8 +233,8 @@
if self._is_test_case(module, node))
for node in test_cases:
for subnode in filter(self._is_test_method, node.body):
- test_name = '%s.%s' % (node.name, subnode.name)
- tests[module_name]['tests'][test_name] = subnode
+ test_name = '%s.%s' % (node.name, subnode.name)
+ tests[module_name]['tests'][test_name] = subnode
return tests
@staticmethod
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index bcb076b..709c319 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -54,7 +54,7 @@
config_min_version = api_version_request.APIVersionRequest(cfg_min_version)
config_max_version = api_version_request.APIVersionRequest(cfg_max_version)
if ((min_version > max_version) or
- (config_min_version > config_max_version)):
+ (config_min_version > config_max_version)):
msg = ("Test Class versions [%s - %s]. "
"Configuration versions [%s - %s]."
% (min_version.get_string(),
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index fcdeb17..1011504 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -273,7 +273,7 @@
# NOTE(andreaf) Not all fields may be available on all credentials
# so defaulting to None for that case.
if all([getattr(creds, k, None) == hash_attributes.get(k, None) for
- k in init_attributes]):
+ k in init_attributes]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index ec46caf..3be441e 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -282,7 +282,7 @@
def get(self, url, headers=None, extra_headers=False):
"""Send a HTTP GET request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the get request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -297,7 +297,7 @@
def delete(self, url, headers=None, body=None, extra_headers=False):
"""Send a HTTP DELETE request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the delete request to
:param dict headers: The headers to use for the request
:param dict body: the request body
:param bool extra_headers: Boolean value than indicates if the headers
@@ -313,7 +313,7 @@
def patch(self, url, body, headers=None, extra_headers=False):
"""Send a HTTP PATCH request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the patch request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
@@ -329,7 +329,7 @@
def put(self, url, body, headers=None, extra_headers=False, chunked=False):
"""Send a HTTP PUT request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the put request to
:param dict body: the request body
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
@@ -346,7 +346,7 @@
def head(self, url, headers=None, extra_headers=False):
"""Send a HTTP HEAD request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the head request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -361,7 +361,7 @@
def copy(self, url, headers=None, extra_headers=False):
"""Send a HTTP COPY request using keystone service catalog and auth
- :param str url: the relative url to send the post request to
+ :param str url: the relative url to send the copy request to
:param dict headers: The headers to use for the request
:param bool extra_headers: Boolean value than indicates if the headers
returned by the get_headers() method are to
@@ -374,7 +374,7 @@
return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
- """Get the versions on a endpoint from the keystone catalog
+ """Get the versions on an endpoint from the keystone catalog
This method will make a GET request on the baseurl from the keystone
catalog to return a list of API versions. It is expected that a GET
@@ -526,7 +526,7 @@
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
- raise exceptions.ResponseWithEntity()
+ raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 3483c51..7f94612 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -170,7 +170,7 @@
:rtype: string
"""
return b''.join([six.int2byte(random.randint(0, 255))
- for i in range(size)])
+ for i in range(size)])
# Courtesy of http://stackoverflow.com/a/312464
diff --git a/tempest/lib/common/utils/misc.py b/tempest/lib/common/utils/misc.py
index 2b0fcd5..a0b0c0a 100644
--- a/tempest/lib/common/utils/misc.py
+++ b/tempest/lib/common/utils/misc.py
@@ -12,9 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
def singleton(cls):
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index b399aa0..4064401 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -136,10 +136,17 @@
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
+
+ :param condition: Optional condition which if true will apply the attr. If
+ a condition is specified which is false the attr will not be applied to
+ the test function. If not specified, the attr is always applied.
"""
def decorator(f):
- if 'type' in kwargs and isinstance(kwargs['type'], str):
+ # Check to see if the attr should be conditional applied.
+ if 'condition' in kwargs and not kwargs.get('condition'):
+ return f
+ if 'type' in kwargs and isinstance(kwargs['type'], six.string_types):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 833cfd6..90debd9 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -18,7 +18,6 @@
import importlib
import inspect
import sys
-import warnings
from debtcollector import removals
from oslo_log import log as logging
@@ -32,9 +31,9 @@
from tempest.lib.services import image
from tempest.lib.services import network
from tempest.lib.services import object_storage
+from tempest.lib.services import placement
from tempest.lib.services import volume
-warnings.simplefilter("once")
LOG = logging.getLogger(__name__)
@@ -46,6 +45,7 @@
"""
return {
'compute': compute,
+ 'placement': placement,
'identity.v2': identity.v2,
'identity.v3': identity.v3,
'image.v1': image.v1,
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index 2fad0a4..5d2dd46 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -172,7 +172,7 @@
https://developer.openstack.org/api-ref/compute/#show-an-extra-spec-for-a-flavor
"""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (flavor_id,
- key))
+ key))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 9eed4b3..18e08cc 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -636,7 +636,7 @@
def list_virtual_interfaces(self, server_id):
"""List the virtual interfaces used in an instance."""
resp, body = self.get('/'.join(['servers', server_id,
- 'os-virtual-interfaces']))
+ 'os-virtual-interfaces']))
body = json.loads(body)
self.validate_response(schema.list_virtual_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index b1d298b..94da043 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -74,6 +74,7 @@
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
scheme = scheme.lower()
netloc = netloc.lower()
+ path = path.replace('//', '/')
normalized_uri = urlparse.urlunparse((scheme, netloc, path,
params, '', ''))
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
new file mode 100644
index 0000000..5c20c57
--- /dev/null
+++ b/tempest/lib/services/placement/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement.placement_client import \
+ PlacementClient
+
+__all__ = ['PlacementClient']
diff --git a/tempest/lib/services/placement/base_placement_client.py b/tempest/lib/services/placement/base_placement_client.py
new file mode 100644
index 0000000..505a515
--- /dev/null
+++ b/tempest/lib/services/placement/base_placement_client.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common import api_version_utils
+from tempest.lib.common import rest_client
+
+PLACEMENT_MICROVERSION = None
+
+
+class BasePlacementClient(rest_client.RestClient):
+
+ api_microversion_header_name = 'OpenStack-API-Version'
+ version_header_value = 'placement %s'
+
+ def get_headers(self):
+ headers = super(BasePlacementClient, self).get_headers()
+ if PLACEMENT_MICROVERSION:
+ headers[self.api_microversion_header_name] = \
+ self.version_header_value % PLACEMENT_MICROVERSION
+ return headers
+
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, chunked=False):
+ resp, resp_body = super(BasePlacementClient, self).request(
+ method, url, extra_headers, headers, body, chunked)
+ if (PLACEMENT_MICROVERSION and
+ PLACEMENT_MICROVERSION != api_version_utils.LATEST_MICROVERSION):
+ api_version_utils.assert_version_header_matches_request(
+ self.api_microversion_header_name,
+ self.version_header_value % PLACEMENT_MICROVERSION,
+ resp)
+ return resp, resp_body
diff --git a/tempest/lib/services/placement/placement_client.py b/tempest/lib/services/placement/placement_client.py
new file mode 100644
index 0000000..2c6d919
--- /dev/null
+++ b/tempest/lib/services/placement/placement_client.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class PlacementClient(base_placement_client.BasePlacementClient):
+
+ def list_allocation_candidates(self, **params):
+ """List allocation candidates.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/placement/#list-allocation-candidates
+ """
+ url = '/allocation_candidates'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_allocations(self, consumer_uuid):
+ """List all allocation records for the consumer.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/placement/#list-allocations
+ """
+ url = '/allocations/%s' % consumer_uuid
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 46395e1..d2fd021 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -187,7 +187,7 @@
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
- for s in security_groups])
+ for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2b35e45..cee543b 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -48,6 +48,7 @@
10. Check SSH connection to instance after reboot
"""
+
def nova_show(self, server):
got_server = (self.servers_client.show_server(server['id'])
['server'])
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 4be2b29..37bcd04 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -119,6 +119,7 @@
server, keypair, floating_ip)
@decorators.idempotent_id('7b6860c2-afa3-4846-9522-adeb38dfbe08')
+ @decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_server_connectivity_reboot(self):
keypair = self.create_keypair()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index c11070c..7992585 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -579,10 +579,17 @@
initial_dns_server = '1.2.3.4'
alt_dns_server = '9.8.7.6'
- # renewal should be immediate.
- # Timeouts are suggested by salvatore-orlando in
+ # Original timeouts are suggested by salvatore-orlando in
# https://bugs.launchpad.net/neutron/+bug/1412325/comments/3
- renew_delay = CONF.network.build_interval
+ #
+ # Compared to that renew_delay was increased, because
+ # busybox's udhcpc accepts SIGUSR1 as a renew request. Internally
+ # it goes into RENEW_REQUESTED state. If it receives a 2nd SIGUSR1
+ # signal while in that state then it calls the deconfig script
+ # ("/sbin/cirros-dhcpc deconfig" in sufficiently new cirros versions)
+ # which leads to the address being transiently deconfigured which
+ # for our case is unwanted.
+ renew_delay = 3 * CONF.network.build_interval
renew_timeout = CONF.network.build_timeout
self._setup_network_and_servers(dns_nameservers=[initial_dns_server])
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 1671216..02bc692 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
+from oslo_serialization import jsonutils as json
from tempest.common import utils
from tempest.common import waiters
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 810480b..6ed7e30 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -11,7 +11,7 @@
# under the License.
from oslo_log import log as logging
-from oslo_serialization import jsonutils
+from oslo_serialization import jsonutils as json
import testtools
from tempest.common import utils
@@ -76,6 +76,7 @@
self.snapshots_client.wait_for_resource_deletion(snapshot_id)
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
+ @decorators.attr(type='slow')
# Note: This test is being skipped based on 'public_network_id'.
# It is being used in create_floating_ip() method which gets called
# from get_server_ip() method
@@ -85,7 +86,6 @@
'Cinder volume snapshots are disabled')
@utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
-
"""This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
@@ -264,7 +264,7 @@
bdms = image.get('block_device_mapping')
if not bdms:
bdms = image['properties']['block_device_mapping']
- bdms = jsonutils.loads(bdms)
+ bdms = json.loads(bdms)
snapshot_id = bdms[0]['snapshot_id']
self._delete_snapshot(snapshot_id)
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 9c18052..7a037eb 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -179,6 +179,7 @@
This class is used to manage the lifecycle of external tempest test
plugins. It provides functions for getting set
"""
+
def __init__(self):
self.ext_plugins = stevedore.ExtensionManager(
'tempest.test_plugins', invoke_on_load=True,
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 330f370..143c6e1 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -37,7 +37,7 @@
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
- top_level_dir=base_path))
+ top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
new file mode 100644
index 0000000..b47da0b
--- /dev/null
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -0,0 +1,26 @@
+# Copyright 2018 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.cmd import cleanup
+from tempest.tests import base
+
+
+class TestTempestCleanup(base.TestCase):
+
+ def test_load_json(self):
+ # instantiate "empty" TempestCleanup
+ c = cleanup.TempestCleanup(None, None, 'test')
+ test_saved_json = 'tempest/tests/cmd/test_saved_state_json.json'
+ # test if the file is loaded without any issues/exceptions
+ c._load_json(test_saved_json)
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 495d127..3262b1c 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -25,6 +25,24 @@
from tempest.tests.lib import fake_http
+class TestBaseService(base.TestCase):
+
+ def test_base_service_init(self):
+ kwargs = {'data': {'data': 'test'},
+ 'is_dry_run': False,
+ 'saved_state_json': {'saved': 'data'},
+ 'is_preserve': False,
+ 'is_save_state': True,
+ 'tenant_id': 'project_id'}
+ base = cleanup_service.BaseService(kwargs)
+ self.assertEqual(base.data, kwargs['data'])
+ self.assertFalse(base.is_dry_run)
+ self.assertEqual(base.saved_state_json, kwargs['saved_state_json'])
+ self.assertFalse(base.is_preserve)
+ self.assertTrue(base.is_save_state)
+ self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
+
+
class MockFunctionsBase(base.TestCase):
def _create_response(self, body, status, headers):
@@ -81,16 +99,50 @@
"images": cleanup_service.CONF_IMAGES[0],
"projects": cleanup_service.CONF_PROJECTS[0],
"users": cleanup_service.CONF_USERS[0],
+ "networks": cleanup_service.CONF_PUB_NETWORK,
+ "security_groups":
+ cleanup_service.CONF_PROJECTS[0],
+ "ports": cleanup_service.CONF_PUB_NETWORK,
+ "routers": cleanup_service.CONF_PUB_ROUTER,
+ "subnetpools": cleanup_service.CONF_PROJECTS[0],
}
- # Static list to ensure global service saved items are not deleted
- saved_state = {"users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
- "flavors": {u'42': u'm1.tiny'},
- "images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
- "roles": {u'3efrt74r45hn': u'president'},
- "projects": {u'f38ohgp93jj032': u'manhattan'},
- "domains": {u'default': u'Default'}
- }
+ saved_state = {
+ # Static list to ensure global service saved items are not deleted
+ "users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
+ "flavors": {u'42': u'm1.tiny'},
+ "images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
+ "roles": {u'3efrt74r45hn': u'president'},
+ "projects": {u'f38ohgp93jj032': u'manhattan'},
+ "domains": {u'default': u'Default'},
+ # Static list to ensure project service saved items are not deleted
+ "snapshots": {u'1ad4c789-7e8w-4dwg-afc5': u'saved-snapshot'},
+ "servers": {u'7a6d4v7w-36ds-4216': u'saved-server'},
+ "server_groups": {u'as6d5f7g-46ca-475e': u'saved-server-group'},
+ "keypairs": {u'saved-key-pair': {
+ u'fingerprint': u'7e:eb:ab:24',
+ u'name': u'saved-key-pair'
+ }},
+ "volumes": {u'aa77asdf-1234': u'saved-volume'},
+ "networks": {u'6722fc13-4319': {
+ u'id': u'6722fc13-4319',
+ u'name': u'saved-network'
+ }},
+ "floatingips": {u'9e82d248-408a': {
+ u'id': u'9e82d248-408a',
+ u'status': u'ACTIVE'
+ }},
+ "routers": {u'4s5w34hj-id44': u'saved-router'},
+ "metering_label_rules": {u'93a973ce-4dc5': {
+ u'direction': u'ingress',
+ u'id': u'93a973ce-4dc5'
+ }},
+ "metering_labels": {u'723b346ce866-4c7q': u'saved-label'},
+ "ports": {u'aa74aa4v-741a': u'saved-port'},
+ "security_groups": {u'7q844add-3697': u'saved-sec-group'},
+ "subnets": {u'55ttda4a-2584': u'saved-subnet'},
+ "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'}
+ }
# Mocked methods
get_method = 'tempest.lib.common.rest_client.RestClient.get'
delete_method = 'tempest.lib.common.rest_client.RestClient.delete'
@@ -120,7 +172,9 @@
mocked_fixture_tuple_list,
)
for fixture in fixtures:
- if fail is False and fixture.mock.return_value == 'exception':
+ if fixture.mock.return_value == 'validate':
+ fixture.mock.assert_called()
+ elif fail is False and fixture.mock.return_value == 'exception':
fixture.mock.assert_not_called()
elif self.service_name in self.saved_state.keys():
fixture.mock.assert_called_once()
@@ -172,6 +226,948 @@
self.assertNotIn(rsp['name'], self.conf_values.values())
+class TestSnapshotService(BaseCmdServiceTests):
+
+ service_class = 'SnapshotService'
+ service_name = 'snapshots'
+ response = {
+ "snapshots": [
+ {
+ "status": "available",
+ "metadata": {
+ "name": "test"
+ },
+ "name": "test-volume-snapshot",
+ "user_id": "40c2102f4a554b848d96b14f3eec39ed",
+ "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506",
+ "created_at": "2015-11-29T02:25:51.000000",
+ "size": 1,
+ "updated_at": "2015-11-20T05:36:40.000000",
+ "os-extended-snapshot-attributes:progress": "100%",
+ "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c",
+ "description": "volume snapshot"
+ },
+ {
+ "status": "available",
+ "name": "saved-snapshot",
+ "id": "1ad4c789-7e8w-4dwg-afc5",
+ "description": "snapshot in saved state"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestServerService(BaseCmdServiceTests):
+
+ service_class = 'ServerService'
+ service_name = 'servers'
+ response = {
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70-6ef0985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e7-6ef35",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ },
+ {
+ "id": "7a6d4v7w-36ds-4216",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70-6ef0985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e7-6ef35",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "saved-server"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestServerGroupService(BaseCmdServiceTests):
+
+ service_class = 'ServerGroupService'
+ service_name = 'server_groups'
+ validate_response = ('tempest.lib.services.compute.server_groups_client'
+ '.ServerGroupsClient.validate_response')
+
+ response = {
+ "server_groups": [
+ {
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "name": "test",
+ "policy": "anti-affinity",
+ "rules": {"max_server_per_host": 3},
+ "members": [],
+ "project_id": "6f70656e737461636b20342065766572",
+ "user_id": "fake"
+ },
+ {
+ "id": "as6d5f7g-46ca-475e",
+ "name": "saved-server-group"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None)
+ ])
+
+
+class TestKeyPairService(BaseCmdServiceTests):
+
+ service_class = 'KeyPairService'
+ service_name = 'keypairs'
+ validate_response = ('tempest.lib.services.compute.keypairs_client'
+ '.KeyPairsClient.validate_response')
+ response = {
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:bd",
+ "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF\n"
+ }
+ },
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24",
+ "name": "saved-key-pair"
+ }
+ }
+ ]
+ }
+
+ def _test_saved_state_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_save_state=True)
+ _, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list
+ )
+ for item in self.response[self.service_name]:
+ self.assertTrue(item['keypair']['name'],
+ serv.data[self.service_name])
+ for fixture in fixtures:
+ fixture.mock.assert_called_once()
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([
+ (self.get_method, self.response, 200),
+ (self.validate_response, 'validate', None)
+ ])
+
+
+class TestVolumeService(BaseCmdServiceTests):
+
+ service_class = 'VolumeService'
+ service_name = 'volumes'
+ response = {
+ "volumes": [
+ {
+ "id": "efa54464-8fab-47cd-a05a-be3e6b396188",
+ "links": [
+ {
+ "href": "http://127.0.0.1:37097/v3/89af/volumes/efa54",
+ "rel": "self"
+ },
+ {
+ "href": "http://127.0.0.1:37097/89af/volumes/efa54464",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "volume-name"
+ },
+ {
+ "id": "aa77asdf-1234",
+ "name": "saved-volume"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+# Begin network service classes
+class TestNetworkService(BaseCmdServiceTests):
+
+ service_class = 'NetworkService'
+ service_name = 'networks'
+ response = {
+ "networks": [
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "l2_adjacency": False,
+ "mtu": 1500,
+ "name": "net1",
+ "port_security_enabled": True,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "router:external": False,
+ "shared": False,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": True,
+ "description": "",
+ "is_default": False
+ },
+ {
+ "id": "6722fc13-4319",
+ "name": "saved-network"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['networks'].append(
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2017-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": cleanup_service.CONF_PUB_NETWORK,
+ "name": "net2",
+ "port_security_enabled": True,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2018-03-08T20:19:41",
+ "vlan_transparent": True,
+ "is_default": False
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkFloatingIpService(BaseCmdServiceTests):
+
+ service_class = 'NetworkFloatingIpService'
+ service_name = 'floatingips'
+ response = {
+ "floatingips": [
+ {
+ "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
+ "description": "for test",
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myfip",
+ "created_at": "2016-12-21T10:55:50Z",
+ "updated_at": "2016-12-21T10:55:53Z",
+ "revision_number": 1,
+ "project_id": "4969c491a3c74ee4af974e6d800c62de",
+ "tenant_id": "4969c491a3c74ee4af974e6d800c62de",
+ "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
+ "fixed_ip_address": "10.0.0.3",
+ "floating_ip_address": "172.24.4.228",
+ "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
+ "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
+ "status": "ACTIVE",
+ "port_details": {
+ "status": "ACTIVE",
+ "name": "",
+ "admin_state_up": True,
+ "network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
+ "device_owner": "compute:nova",
+ "mac_address": "fa:16:3e:b1:3b:30",
+ "device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
+ },
+ "tags": ["tag1,tag2"],
+ "port_forwardings": []
+ },
+ {
+ "id": "9e82d248-408a",
+ "status": "ACTIVE"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkRouterService(BaseCmdServiceTests):
+
+ service_class = 'NetworkRouterService'
+ service_name = 'routers'
+ validate_response = ('tempest.lib.services.network.routers_client'
+ '.RoutersClient.validate_response')
+ response = {
+ "routers": [
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": False,
+ "external_gateway_info": {
+ "enable_snat": True,
+ "external_fixed_ips": [
+ {
+ "ip_address": "172.24.4.3",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
+ },
+ {
+ "ip_address": "2001:db8::c",
+ "subnet_id": "0c56df5d-ace5-46c8-8f4c-45fa4e334d18"
+ }
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
+ },
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": False,
+ "id": "915a14a6-867b-4af7-83d1-70efceb146f9",
+ "name": "router2",
+ "revision_number": 1,
+ "routes": [
+ {
+ "destination": "179.24.1.0/24",
+ "nexthop": "172.24.3.99"
+ }
+ ],
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "tags": ["tag1,tag2"]
+ },
+ {
+ "id": "4s5w34hj-id44",
+ "name": "saved-router"
+ }
+ ],
+ # "ports" key is added to the response in order to simplify unit
+ # testing - it's because NetworkRouterService's delete method lists
+ # ports before deleting any router
+ "ports": []
+ }
+
+ def _test_delete(self, mocked_fixture_tuple_list, fail=False):
+ serv = self._create_cmd_service(self.service_class)
+ resp, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list,
+ )
+ for fixture in fixtures:
+ if fail is False and fixture.mock.return_value == 'exception':
+ fixture.mock.assert_not_called()
+ elif self.service_name in self.saved_state.keys():
+ fixture.mock.assert_called()
+ for key in self.saved_state[self.service_name].keys():
+ self.assertNotIn(key, fixture.mock.call_args[0][0])
+ self.assertFalse(serv.data)
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['routers'].append(
+ {
+ "admin_state_up": True,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "id": cleanup_service.CONF_PUB_ROUTER,
+ "name": "router-preserve",
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "tags": ["tag1,tag2"]
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkMeteringLabelRuleService(BaseCmdServiceTests):
+
+ service_class = 'NetworkMeteringLabelRuleService'
+ service_name = 'metering_label_rules'
+ response = {
+ "metering_label_rules": [
+ {
+ "remote_ip_prefix": "20.0.0.0/24",
+ "direction": "ingress",
+ "metering_label_id": "e131d186-b02d-4c0b-83d5-0c0725c4f812",
+ "id": "9536641a-7d14-4dc5-afaf-93a973ce0eb8",
+ "excluded": False
+ },
+ {
+ "direction": "ingress",
+ "id": "93a973ce-4dc5"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkMeteringLabelService(BaseCmdServiceTests):
+
+ service_class = 'NetworkMeteringLabelService'
+ service_name = 'metering_labels'
+ response = {
+ "metering_labels": [
+ {
+ "project_id": "45345b0ee1ea477fac0f541b2cb79cd4",
+ "tenant_id": "45345b0ee1ea477fac0f541b2cb79cd4",
+ "description": "label1 description",
+ "name": "label1",
+ "id": "a6700594-5b7a-4105-8bfe-723b346ce866",
+ "shared": False
+ },
+ {
+ "name": "saved-label",
+ "id": "723b346ce866-4c7q",
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkPortService(BaseCmdServiceTests):
+
+ service_class = 'NetworkPortService'
+ service_name = 'ports'
+ response = {
+ "ports": [
+ {
+ "admin_state_up": True,
+ "allowed_address_pairs": [],
+ "created_at": "2016-03-08T20:19:41",
+ "description": "",
+ "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
+ "device_owner": "",
+ "dns_assignment": {
+ "hostname": "myport",
+ "ip_address": "172.24.4.2",
+ "fqdn": "myport.my-domain.org"
+ },
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myport",
+ "extra_dhcp_opts": [
+ {
+ "opt_value": "pxelinux.0",
+ "ip_version": 4,
+ "opt_name": "bootfile-name"
+ }
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "172.24.4.2",
+ "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062"
+ }
+ ],
+ "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
+ "ip_allocation": "immediate",
+ "mac_address": "fa:16:3e:58:42:ed",
+ "name": "test_port",
+ "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3",
+ "project_id": "",
+ "revision_number": 1,
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "",
+ "updated_at": "2016-03-08T20:19:41",
+ "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+ "port_security_enabled": False
+ },
+ {
+ "id": "aa74aa4v-741a",
+ "name": "saved-port",
+ "device_owner": ""
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['ports'].append(
+ {
+ "created_at": "2018-03-08T20:19:41",
+ "description": "",
+ "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
+ "device_owner": "compute:router_gateway",
+ "id": "d80b1a3b-4fc1-49f3-952e-1fdy1ws542",
+ "ip_allocation": "immediate",
+ "mac_address": "fa:16:3e:58:42:ed",
+ "name": "preserve_port",
+ "network_id": cleanup_service.CONF_PUB_NETWORK,
+ "project_id": "",
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "",
+ "updated_at": "2018-03-08T20:19:41",
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkSecGroupService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSecGroupService'
+ service_name = 'security_groups'
+ response = {
+ "security_groups": [
+ {
+ "description": "default",
+ "id": "85cc3048-abc3-43cc-89b3-377341426ac5",
+ "name": "test",
+ "security_group_rules": [
+ {
+ "direction": "egress",
+ "ethertype": "IPv6",
+ "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
+ "security_group_id": "85cc3048-abc3-43cc-89b3-3773414",
+ "project_id": "e4f50856753b4dc6afee5fa6b9b6c550",
+ "revision_number": 1,
+ "tags": ["tag1,tag2"],
+ "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550",
+ "created_at": "2018-03-19T19:16:56Z",
+ "updated_at": "2018-03-19T19:16:56Z",
+ "description": ""
+ }
+ ]
+ },
+ {
+ "id": "7q844add-3697",
+ "name": "saved-sec-group"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['security_groups'].append(
+ {
+ "description": "default",
+ "id": "85cc3048-abc3-43cc-89b3-377341426ac5",
+ "name": "test",
+ "security_group_rules": [
+ {
+ "direction": "egress",
+ "ethertype": "IPv6",
+ "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
+ "security_group_id": "85cc3048-abc3-43cc-89b3-3773414",
+ "project_id": cleanup_service.CONF_PROJECTS[0],
+ "revision_number": 1,
+ "tags": ["tag1,tag2"],
+ "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550",
+ "created_at": "2018-03-19T19:16:56Z",
+ "updated_at": "2018-03-19T19:16:56Z",
+ "description": ""
+ }
+ ]
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkSubnetService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSubnetService'
+ service_name = 'subnets'
+ response = {
+ "subnets": [
+ {
+ "name": "private-subnet",
+ "enable_dhcp": True,
+ "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "dns_nameservers": [],
+ "allocation_pools": [
+ {
+ "start": "10.0.0.2",
+ "end": "10.0.0.254"
+ }
+ ],
+ "host_routes": [],
+ "ip_version": 4,
+ "gateway_ip": "10.0.0.1",
+ "cidr": "10.0.0.0/24",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2016-10-10T14:35:34Z",
+ "revision_number": 2,
+ "service_types": [],
+ "tags": ["tag1,tag2"],
+ "updated_at": "2016-10-10T14:35:34Z"
+ },
+ {
+ "id": "55ttda4a-2584",
+ "name": "saved-subnet"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['subnets'].append(
+ {
+ "name": "public-subnet",
+ "network_id": cleanup_service.CONF_PUB_NETWORK,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "ip_version": 4,
+ "gateway_ip": "10.0.0.1",
+ "cidr": "10.0.0.0/24",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2018-10-10T14:35:34Z",
+ "service_types": [],
+ "tags": ["tag1,tag2"],
+ "updated_at": "2018-10-10T14:35:34Z"
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestNetworkSubnetPoolsService(BaseCmdServiceTests):
+
+ service_class = 'NetworkSubnetPoolsService'
+ service_name = 'subnetpools'
+ response = {
+ "subnetpools": [
+ {
+ "min_prefixlen": "64",
+ "default_prefixlen": "64",
+ "id": "03f761e6-eee0-43fc-a921-8acf64c14988",
+ "max_prefixlen": "64",
+ "name": "my-subnet-pool-ipv6",
+ "is_default": False,
+ "project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
+ "tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
+ "prefixes": [
+ "2001:db8:0:2::/64",
+ "2001:db8::/63"
+ ],
+ "ip_version": 6,
+ "shared": False,
+ "description": "",
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41",
+ "revision_number": 2,
+ "tags": ["tag1,tag2"]
+ },
+ {
+ "id": "8acf64c1-43fc",
+ "name": "saved-subnet-pool"
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['subnetpools'].append(
+ {
+ "min_prefixlen": "64",
+ "default_prefixlen": "64",
+ "id": "9acf64c1-43fc",
+ "name": "preserve-pool",
+ "project_id": cleanup_service.CONF_PROJECTS[0],
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41"
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+# begin global services
class TestDomainService(BaseCmdServiceTests):
service_class = 'DomainService'
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index e159cdc..00f8bc5 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -108,6 +108,27 @@
subprocess.call(['stestr', 'init'])
self.assertRunExit(['tempest', 'run', '--regex', 'passing'], 0)
+ def test_tempest_run_failing(self):
+ self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
+
+ def test_tempest_run_failing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
+
+ def test_tempest_run_blackregex_failing(self):
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+
+ def test_tempest_run_blackregex_failing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+
+ def test_tempest_run_blackregex_passing(self):
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+
+ def test_tempest_run_blackregex_passing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+
def test_tempest_run_fails(self):
self.assertRunExit(['tempest', 'run'], 1)
@@ -136,7 +157,7 @@
whitelist_file.write('passing'.encode('utf-8'))
self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path], 0)
- def test_tempest_run_with_whitelist_with_regex(self):
+ def test_tempest_run_with_whitelist_regex_include_pass_check_fail(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
whitelist_file = os.fdopen(fd, 'wb', 0)
@@ -145,11 +166,72 @@
self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
'--regex', 'fail'], 1)
+ def test_tempest_run_with_whitelist_regex_include_pass_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ whitelist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(whitelist_file.close)
+ whitelist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ '--regex', 'passing'], 0)
+
+ def test_tempest_run_with_whitelist_regex_include_fail_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ whitelist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(whitelist_file.close)
+ whitelist_file.write('failing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ '--regex', 'pass'], 1)
+
def test_tempest_run_passes_with_config_file(self):
self.assertRunExit(['tempest', 'run',
'--config-file', self.stestr_conf_file,
'--regex', 'passing'], 0)
+ def test_tempest_run_with_blacklist_failing(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('failing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 0)
+
+ def test_tempest_run_with_blacklist_passing(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 1)
+
+ def test_tempest_run_with_blacklist_regex_exclude_fail_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('failing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ '--regex', 'pass'], 0)
+
+ def test_tempest_run_with_blacklist_regex_exclude_pass_check_pass(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ '--regex', 'pass'], 1)
+
+ def test_tempest_run_with_blacklist_regex_exclude_pass_check_fail(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ blacklist_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(blacklist_file.close)
+ blacklist_file.write('passing'.encode('utf-8'))
+ self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ '--regex', 'fail'], 1)
+
class TestConfigPathCheck(base.TestCase):
def setUp(self):
diff --git a/tempest/tests/cmd/test_saved_state_json.json b/tempest/tests/cmd/test_saved_state_json.json
new file mode 100644
index 0000000..5c55331
--- /dev/null
+++ b/tempest/tests/cmd/test_saved_state_json.json
@@ -0,0 +1,16 @@
+{
+ "domains": {
+ "default": "Default"
+ },
+ "flavors": {
+ "1": "m1.tiny"
+ },
+ "images": {},
+ "projects": {
+ "268bcb63488b4aa2942ecaac0f85ed62": "demo"
+ },
+ "roles": {},
+ "users": {
+ "023e65a5922a454585a91c6af8310968": "demo"
+ }
+}
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index 65481de..7a6b576 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -48,7 +48,7 @@
stdout, stderr = process.communicate()
return_code = process.returncode
msg = ("%s failed with:\nstdout: %s\nstderr: %s" % (' '.join(cmd),
- stdout, stderr))
+ stdout, stderr))
self.assertEqual(return_code, expected, msg)
def test_run_workspace_list(self):
@@ -133,12 +133,89 @@
"None or empty name is specified."
" Please specify correct name for workspace.\n")
+ def test_workspace_manager_rename_with_existing_name(self):
+ new_name = self.name
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ self.name, new_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace already exists with name: %s.\n"
+ % new_name)
+
+ def test_workspace_manager_rename_no_exist_old_name(self):
+ old_name = ""
+ new_name = data_utils.rand_uuid()
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ old_name, new_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % old_name)
+
+ def test_workspace_manager_rename_integer_data(self):
+ old_name = self.name
+ new_name = 12345
+ self.workspace_manager.rename_workspace(old_name, new_name)
+ self.assertIsNone(self.workspace_manager.get_workspace(old_name))
+ self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+
+ def test_workspace_manager_rename_alphanumeric_data(self):
+ old_name = self.name
+ new_name = 'abc123'
+ self.workspace_manager.rename_workspace(old_name, new_name)
+ self.assertIsNone(self.workspace_manager.get_workspace(old_name))
+ self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+
def test_workspace_manager_move(self):
new_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
self.workspace_manager.move_workspace(self.name, new_path)
self.assertEqual(
self.workspace_manager.get_workspace(self.name), new_path)
+ # NOTE(mbindlish): Also checking for the workspace that it
+ # shouldn't exist in old path
+ self.assertNotEqual(
+ self.workspace_manager.get_workspace(self.name), self.path)
+
+ def test_workspace_manager_move_wrong_path(self):
+ new_path = 'wrong/path'
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ self.name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "Path does not exist.\n")
+
+ def test_workspace_manager_move_wrong_workspace(self):
+ workspace_name = "wrong_workspace_name"
+ new_path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ workspace_name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % workspace_name)
+
+ def test_workspace_manager_move_no_workspace_name(self):
+ workspace_name = ""
+ new_path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ workspace_name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % workspace_name)
def test_workspace_manager_move_no_workspace_path(self):
new_path = ""
@@ -155,6 +232,30 @@
self.workspace_manager.remove_workspace_entry(self.name)
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
+ def test_workspace_manager_remove_entry_no_name(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_entry,
+ no_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % no_name)
+
+ def test_workspace_manager_remove_entry_wrong_name(self):
+ wrong_name = "wrong_name"
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_entry,
+ wrong_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % wrong_name)
+
def test_workspace_manager_remove_directory(self):
path = self.workspace_manager.remove_workspace_entry(self.name)
self.workspace_manager.remove_workspace_directory(path)
@@ -188,8 +289,11 @@
nonexistent_name)
self.assertEqual(1, ex.code)
self.assertEqual(mock_stdout.getvalue(),
- "A workspace was not found with name: %s\n" %
- nonexistent_name)
+ "A workspace was not found with name: %s\n"
+ % nonexistent_name)
+
+ def test_workspace_name_exists(self):
+ self.assertIsNone(self.workspace_manager._name_exists(self.name))
def test_workspace_name_already_exists(self):
duplicate_name = self.name
@@ -203,6 +307,11 @@
"A workspace already exists with name: %s.\n"
% duplicate_name)
+ def test_workspace_name_exists_check_new_name(self):
+ new_name = "fake_name"
+ self.assertIsNone(self.workspace_manager.
+ _workspace_name_exists(new_name))
+
def test_workspace_manager_path_not_exist(self):
fake_path = "fake_path"
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
@@ -213,6 +322,11 @@
self.assertEqual(mock_stdout.getvalue(),
"Path does not exist.\n")
+ def test_validate_path_exists(self):
+ new_path = self.path
+ self.assertIsNone(self.workspace_manager.
+ _validate_path(new_path))
+
def test_workspace_manager_list_workspaces(self):
listed = self.workspace_manager.list_workspaces()
self.assertEqual(1, len(listed))
@@ -242,3 +356,21 @@
self.assertEqual(mock_stdout.getvalue(),
"None or empty path is specified for workspace."
" Please specify correct workspace path.\n")
+
+ def test_register_new_workspace_integer_data(self):
+ workspace_name = 12345
+ self.workspace_manager.register_new_workspace(
+ workspace_name, self.path)
+ self.assertIsNotNone(
+ self.workspace_manager.get_workspace(workspace_name))
+ self.assertEqual(
+ self.workspace_manager.get_workspace(workspace_name), self.path)
+
+ def test_register_new_workspace_alphanumeric_data(self):
+ workspace_name = 'abc123'
+ self.workspace_manager.register_new_workspace(
+ workspace_name, self.path)
+ self.assertIsNotNone(
+ self.workspace_manager.get_workspace(workspace_name))
+ self.assertEqual(
+ self.workspace_manager.get_workspace(workspace_name), self.path)
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index ebcf5d1..4723458 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -109,8 +109,8 @@
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': id, 'name': name},
- {'id': '1', 'name': 'FakeRole'},
- {'id': '2', 'name': 'Member'}]}))))
+ {'id': '1', 'name': 'FakeRole'},
+ {'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_2_roles(self):
@@ -120,8 +120,8 @@
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': '1234', 'name': 'role1'},
- {'id': '1', 'name': 'FakeRole'},
- {'id': '12345', 'name': 'role2'}]}))))
+ {'id': '1', 'name': 'FakeRole'},
+ {'id': '12345', 'name': 'role2'}]}))))
return roles_fix
def _mock_assign_user_role(self):
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index 4c0bb57..b861582 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -13,10 +13,10 @@
# under the License.
import copy
-import json
import fixtures
import jsonschema
+from oslo_serialization import jsonutils as json
import six
from tempest.lib.common import http
diff --git a/tempest/tests/lib/services/compute/test_images_client.py b/tempest/tests/lib/services/compute/test_images_client.py
index c2c3b76..d1500e5 100644
--- a/tempest/tests/lib/services/compute/test_images_client.py
+++ b/tempest/tests/lib/services/compute/test_images_client.py
@@ -186,15 +186,19 @@
def _test_resource_deleted(self, bytes_body=False):
params = {"id": self.FAKE_IMAGE_ID}
expected_op = self.FAKE_IMAGE_DATA['show']
- self.useFixture(fixtures.MockPatch('tempest.lib.services.compute'
- '.images_client.ImagesClient.show_image',
- side_effect=lib_exc.NotFound))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'tempest.lib.services.compute'
+ '.images_client.ImagesClient.show_image',
+ side_effect=lib_exc.NotFound))
self.assertEqual(True, self.client.is_resource_deleted(**params))
tempdata = copy.deepcopy(self.FAKE_IMAGE_DATA['show'])
tempdata['image']['id'] = None
- self.useFixture(fixtures.MockPatch('tempest.lib.services.compute'
- '.images_client.ImagesClient.show_image',
- return_value=expected_op))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'tempest.lib.services.compute'
+ '.images_client.ImagesClient.show_image',
+ return_value=expected_op))
self.assertEqual(False, self.client.is_resource_deleted(**params))
def test_list_images_with_str_body(self):
diff --git a/tempest/tests/lib/services/identity/v2/test_token_client.py b/tempest/tests/lib/services/identity/v2/test_token_client.py
index dfce9b3..a592ada 100644
--- a/tempest/tests/lib/services/identity/v2/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v2/test_token_client.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
+from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
diff --git a/tempest/tests/lib/services/identity/v3/test_token_client.py b/tempest/tests/lib/services/identity/v3/test_token_client.py
index 38e8c4a..a9c58df 100644
--- a/tempest/tests/lib/services/identity/v3/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_token_client.py
@@ -12,9 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
+from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
diff --git a/tempest/tests/lib/services/placement/__init__.py b/tempest/tests/lib/services/placement/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/lib/services/placement/__init__.py
diff --git a/tempest/tests/lib/services/placement/test_placement_client.py b/tempest/tests/lib/services/placement/test_placement_client.py
new file mode 100644
index 0000000..1396a85
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_placement_client.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement import placement_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestPlacementClient(base.BaseServiceTest):
+ FAKE_ALLOCATION_CANDIDATES = {
+ 'allocation_requests': [
+ {'allocations': {
+ 'rp-uuid': {'resources': {'VCPU': 42}}
+ }}
+ ],
+ 'provider_summaries': {
+ 'rp-uuid': {
+ 'resources': {
+ 'VCPU': {'used': 0, 'capacity': 64},
+ 'MEMORY_MB': {'capacity': 11196, 'used': 0},
+ 'DISK_GB': {'capacity': 19, 'used': 0}
+ },
+ 'traits': ["HW_CPU_X86_SVM"],
+ }
+ }
+ }
+
+ FAKE_ALLOCATIONS = {
+ 'allocations': {
+ 'rp-uuid-1': {
+ 'resources': {
+ 'NET_BW_IGR_KILOBIT_PER_SEC': 1
+ },
+ 'generation': 14
+ },
+ 'rp-uuid2': {
+ 'resources': {
+ 'MEMORY_MB': 256,
+ 'VCPU': 1
+ },
+ 'generation': 9
+ }
+ }
+ }
+
+ def setUp(self):
+ super(TestPlacementClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = placement_client.PlacementClient(
+ fake_auth, 'placement', 'regionOne')
+
+ def _test_list_allocation_candidates(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_allocation_candidates,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ALLOCATION_CANDIDATES,
+ to_utf=bytes_body,
+ **{'resources1': 'NET_BW_IGR_KILOBIT_PER_SEC:1'})
+
+ def test_list_allocation_candidates_with_str_body(self):
+ self._test_list_allocation_candidates()
+
+ def test_list_allocation_candidates_with_bytes_body(self):
+ self._test_list_allocation_candidates(bytes_body=True)
+
+ def _test_list_allocations(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_allocations,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ALLOCATIONS,
+ to_utf=bytes_body,
+ **{'consumer_uuid': 'foo-bar'})
+
+ def test_list_allocations_with_str_body(self):
+ self._test_list_allocations()
+
+ def test_list_allocations_with_bytes_body(self):
+ self._test_list_allocations(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
index e0f5566..84c7589 100644
--- a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
@@ -62,7 +62,7 @@
resp_body = self.FAKE_POOLS_LIST
else:
resp_body = {'pools': [{'name': pool['name']}
- for pool in self.FAKE_POOLS_LIST['pools']]}
+ for pool in self.FAKE_POOLS_LIST['pools']]}
self.check_service_client_function(
self.client.list_pools,
'tempest.lib.common.rest_client.RestClient.get',
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index 0b1a599..3e6160e 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -32,9 +32,17 @@
# By our decorators.attr decorator the attribute __testtools_attrs
# will be set only for 'type' argument, so we test it first.
if 'type' in decorator_args:
- # this is what testtools sets
- self.assertEqual(getattr(foo, '__testtools_attrs'),
- set(expected_attrs))
+ if 'condition' in decorator_args:
+ if decorator_args['condition']:
+ # The expected attrs should be in the function.
+ self.assertEqual(set(expected_attrs),
+ getattr(foo, '__testtools_attrs'))
+ else:
+ # The expected attrs should not be in the function.
+ self.assertNotIn('__testtools_attrs', foo)
+ else:
+ self.assertEqual(set(expected_attrs),
+ getattr(foo, '__testtools_attrs'))
def test_attr_without_type(self):
self._test_attr_helper(expected_attrs='baz', bar='baz')
@@ -50,6 +58,13 @@
def test_attr_decorator_with_duplicated_type(self):
self._test_attr_helper(expected_attrs=['foo'], type=['foo', 'foo'])
+ def test_attr_decorator_condition_false(self):
+ self._test_attr_helper(None, type='slow', condition=False)
+
+ def test_attr_decorator_condition_true(self):
+ self._test_attr_helper(expected_attrs=['slow'], type='slow',
+ condition=True)
+
class TestSkipBecauseDecorator(base.TestCase):
def _test_skip_because_helper(self, expected_to_skip=True,
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 9534ce8..83c1abb 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -48,6 +48,7 @@
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
+
def test_no_setup_teardown_class_for_tests(self):
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
diff --git a/tools/format.sh b/tools/format.sh
new file mode 100755
index 0000000..adffb8c
--- /dev/null
+++ b/tools/format.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+cd $(dirname "$(readlink -f "$0")")
+
+autopep8 --exit-code --max-line-length=79 --experimental --in-place -r ../tempest ../setup.py && echo Formatting was not needed. >&2
+
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index 661329b..16e7b8c 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -18,20 +18,17 @@
# This script is intended to check the sanity of tempest plugins against
# tempest master.
# What it does:
-# * Creates the virtualenv
-# * Install tempest
# * Retrieve the project lists having tempest plugin if project name is
# given.
-# * For each project in a list, It does:
+# * For each project in a list, it does:
+# * Create virtualenv and install tempest in it
# * Clone the Project
# * Install the Project and also installs dependencies from
# test-requirements.txt.
# * Create Tempest workspace
# * List tempest plugins
# * List tempest plugins tests
-# * Uninstall the project and its dependencies
-# * Again Install tempest
-# * Again repeat the step from cloning project
+# * Delete virtualenv and project repo
#
# If one of the step fails, The script will exit with failure.
@@ -46,8 +43,39 @@
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
-# List of projects having tempest plugin stale or unmaintained from long time
-BLACKLIST="networking-plumgrid,trio2o"
+# List of projects having tempest plugin stale or unmaintained for a long time
+# (6 months or more)
+# TODO(masayukig): Some of these can be removed from BLACKLIST in the future.
+# airship-tempest-plugin: https://review.openstack.org/#/c/634387/
+# barbican-tempest-plugin: https://review.openstack.org/#/c/634631/
+# intel-nfv-ci-tests: https://review.openstack.org/#/c/634640/
+# networking-ansible: https://review.openstack.org/#/c/634647/
+# networking-generic-switch: https://review.openstack.org/#/c/634846/
+# networking-l2gw-tempest-plugin: https://review.openstack.org/#/c/635093/
+# networking-midonet: https://review.openstack.org/#/c/635096/
+# networking-plumgrid: https://review.openstack.org/#/c/635096/
+# networking-spp: https://review.openstack.org/#/c/635098/
+# neutron-dynamic-routing: https://review.openstack.org/#/c/637718/
+# neutron-vpnaas: https://review.openstack.org/#/c/637719/
+# nova-lxd: https://review.openstack.org/#/c/638334/
+# valet: https://review.openstack.org/#/c/638339/
+# vitrage-tempest-plugin: https://review.openstack.org/#/c/639003/
+BLACKLIST="
+airship-tempest-plugin
+barbican-tempest-plugin
+intel-nfv-ci-tests
+networking-ansible
+networking-generic-switch
+networking-l2gw-tempest-plugin
+networking-midonet
+networking-plumgrid
+networking-spp
+neutron-dynamic-routing
+neutron-vpnaas
+nova-lxd
+valet
+vitrage-tempest-plugin
+"
# Function to clone project using zuul-cloner or from git
function clone_project() {
@@ -63,14 +91,16 @@
fi
}
-# Create virtualenv to perform sanity operation
-SANITY_DIR=$(pwd)
-virtualenv "$SANITY_DIR"/.venv
-export TVENV="$SANITY_DIR/tools/with_venv.sh"
-cd "$SANITY_DIR"
+# function to create virtualenv to perform sanity operation
+function prepare_workspace() {
+ SANITY_DIR=$(pwd)
+ virtualenv --clear "$SANITY_DIR"/.venv
+ export TVENV="$SANITY_DIR/tools/with_venv.sh"
+ cd "$SANITY_DIR"
-# Install tempest in a venv
-"$TVENV" pip install .
+ # Install tempest with test dependencies in a venv
+ "$TVENV" pip install -e . -r test-requirements.txt
+}
# Function to install project
function install_project() {
@@ -83,30 +113,31 @@
# Function to perform sanity checking on Tempest plugin
function tempest_sanity() {
- "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity
- cd "$SANITY_DIR"/tempest_sanity
- "$TVENV" tempest list-plugins
+ "$TVENV" tempest init "$SANITY_DIR"/tempest_sanity && \
+ cd "$SANITY_DIR"/tempest_sanity && \
+ "$TVENV" tempest list-plugins && \
"$TVENV" tempest run -l
+ retval=$?
# Delete tempest workspace
+ # NOTE: Cleaning should be done even if an error occurs.
"$TVENV" tempest workspace remove --name tempest_sanity --rmdir
cd "$SANITY_DIR"
-}
-
-# Function to uninstall project
-function uninstall_project() {
- "$TVENV" pip uninstall -y "$SANITY_DIR"/openstack/"$1"
- # Check for *requirements.txt file in a project then uninstall it.
- if [ -e "$SANITY_DIR"/openstack/"$1"/*requirements.txt ]; then
- "$TVENV" pip uninstall -y -r "$SANITY_DIR"/openstack/"$1"/*requirements.txt
- fi
+ # Remove the sanity workspace in case of remaining
+ rm -fr "$SANITY_DIR"/tempest_sanity
# Remove the project directory after sanity run
rm -fr "$SANITY_DIR"/openstack/"$1"
+
+ return $retval
}
# Function to run sanity check on each project
function plugin_sanity_check() {
- clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
- && uninstall_project "$1" && "$TVENV" pip install .
+ prepare_workspace && \
+ clone_project "$1" && \
+ install_project "$1" && \
+ tempest_sanity "$1"
+
+ return $?
}
# Log status
@@ -117,11 +148,12 @@
# Remove blacklisted tempest plugins
if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
plugin_sanity_check $project && passed_plugin+=", $project" || \
- failed_plugin+=", $project"
+ failed_plugin+="$project, " > $SANITY_DIR/$project.txt
fi
done
# Check for failed status
if [[ -n $failed_plugin ]]; then
+ echo "Failed Plugins: $failed_plugin"
exit 1
fi
diff --git a/tox.ini b/tox.ini
index 4068054..433f168 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = pep8,py36,py27,pip-check-reqs
+envlist = pep8,py36,py37,py27,pip-check-reqs
minversion = 2.3.1
skipsdist = True
@@ -197,11 +197,21 @@
whitelist_externals = rm
[testenv:pep8]
+deps =
+ -r test-requirements.txt
+ autopep8
basepython = python3
commands =
+ autopep8 --exit-code --max-line-length=79 --experimental --diff -r tempest setup.py
flake8 {posargs}
check-uuid
+[testenv:autopep8]
+deps = autopep8
+basepython = python3
+commands =
+ autopep8 --max-line-length=79 --experimental --in-place -r tempest setup.py
+
[testenv:uuidgen]
commands =
check-uuid --fix