Merge "Removes unnecessary utf-8 encoding"
diff --git a/.gitignore b/.gitignore
index 9292dbb..287db4c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
ChangeLog
*.pyc
__pycache__/
+etc/accounts.yaml
etc/tempest.conf
etc/tempest.conf.sample
etc/logging.conf
diff --git a/releasenotes/notes/add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml b/releasenotes/notes/14.0.0-add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
similarity index 100%
rename from releasenotes/notes/add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
rename to releasenotes/notes/14.0.0-add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
diff --git a/releasenotes/notes/add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml b/releasenotes/notes/14.0.0-add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml
similarity index 100%
rename from releasenotes/notes/add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml
rename to releasenotes/notes/14.0.0-add-cred_client-to-tempest.lib-4d4af33f969c576f.yaml
diff --git a/releasenotes/notes/add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml b/releasenotes/notes/14.0.0-add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml
similarity index 100%
rename from releasenotes/notes/add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml
rename to releasenotes/notes/14.0.0-add-error-code-translation-to-versions-clients-acbc78292e24b014.yaml
diff --git a/releasenotes/notes/add-image-clients-af94564fb34ddca6.yaml b/releasenotes/notes/14.0.0-add-image-clients-af94564fb34ddca6.yaml
similarity index 100%
rename from releasenotes/notes/add-image-clients-af94564fb34ddca6.yaml
rename to releasenotes/notes/14.0.0-add-image-clients-af94564fb34ddca6.yaml
diff --git a/releasenotes/notes/add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/14.0.0-add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml
similarity index 100%
rename from releasenotes/notes/add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml
rename to releasenotes/notes/14.0.0-add-role-assignments-client-as-a-library-d34b4fdf376984ad.yaml
diff --git a/releasenotes/notes/add-service-provider-client-cbba77d424a30dd3.yaml b/releasenotes/notes/14.0.0-add-service-provider-client-cbba77d424a30dd3.yaml
similarity index 100%
rename from releasenotes/notes/add-service-provider-client-cbba77d424a30dd3.yaml
rename to releasenotes/notes/14.0.0-add-service-provider-client-cbba77d424a30dd3.yaml
diff --git a/releasenotes/notes/add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml b/releasenotes/notes/14.0.0-add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
similarity index 100%
rename from releasenotes/notes/add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
rename to releasenotes/notes/14.0.0-add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
diff --git a/releasenotes/notes/deprecate-nova-api-extensions-df16b02485dae203.yaml b/releasenotes/notes/14.0.0-deprecate-nova-api-extensions-df16b02485dae203.yaml
similarity index 100%
rename from releasenotes/notes/deprecate-nova-api-extensions-df16b02485dae203.yaml
rename to releasenotes/notes/14.0.0-deprecate-nova-api-extensions-df16b02485dae203.yaml
diff --git a/releasenotes/notes/move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml b/releasenotes/notes/14.0.0-move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml
similarity index 100%
rename from releasenotes/notes/move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml
rename to releasenotes/notes/14.0.0-move-cinder-v3-to-lib-service-be3ba0c20753b594.yaml
diff --git a/releasenotes/notes/new-volume-limit-client-517c17d9090f4df4.yaml b/releasenotes/notes/14.0.0-new-volume-limit-client-517c17d9090f4df4.yaml
similarity index 100%
rename from releasenotes/notes/new-volume-limit-client-517c17d9090f4df4.yaml
rename to releasenotes/notes/14.0.0-new-volume-limit-client-517c17d9090f4df4.yaml
diff --git a/releasenotes/notes/remo-stress-tests-81052b211ad95d2e.yaml b/releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
similarity index 100%
rename from releasenotes/notes/remo-stress-tests-81052b211ad95d2e.yaml
rename to releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
diff --git a/releasenotes/notes/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml b/releasenotes/notes/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml
new file mode 100644
index 0000000..ca2635e
--- /dev/null
+++ b/releasenotes/notes/14.0.0-remove-baremetal-tests-65186d9e15d5b8fb.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - All tests for the Ironic project have been removed from Tempest. Those
+ exist as a Tempest plugin in the Ironic project.
diff --git a/releasenotes/notes/remove-bootable-option-024f8944c056a3e0.yaml b/releasenotes/notes/14.0.0-remove-bootable-option-024f8944c056a3e0.yaml
similarity index 100%
rename from releasenotes/notes/remove-bootable-option-024f8944c056a3e0.yaml
rename to releasenotes/notes/14.0.0-remove-bootable-option-024f8944c056a3e0.yaml
diff --git a/releasenotes/notes/remove-negative-test-generator-1653f4c0f86ccf75.yaml b/releasenotes/notes/14.0.0-remove-negative-test-generator-1653f4c0f86ccf75.yaml
similarity index 100%
rename from releasenotes/notes/remove-negative-test-generator-1653f4c0f86ccf75.yaml
rename to releasenotes/notes/14.0.0-remove-negative-test-generator-1653f4c0f86ccf75.yaml
diff --git a/releasenotes/notes/remove-sahara-tests-1532c47c7df80e3a.yaml b/releasenotes/notes/14.0.0-remove-sahara-tests-1532c47c7df80e3a.yaml
similarity index 100%
rename from releasenotes/notes/remove-sahara-tests-1532c47c7df80e3a.yaml
rename to releasenotes/notes/14.0.0-remove-sahara-tests-1532c47c7df80e3a.yaml
diff --git a/releasenotes/notes/13.1.0-volume-clients-as-library-309030c7a16e62ab.yaml b/releasenotes/notes/14.0.0-volume-clients-as-library-309030c7a16e62ab.yaml
similarity index 100%
rename from releasenotes/notes/13.1.0-volume-clients-as-library-309030c7a16e62ab.yaml
rename to releasenotes/notes/14.0.0-volume-clients-as-library-309030c7a16e62ab.yaml
diff --git a/releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml b/releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
new file mode 100644
index 0000000..1af1939
--- /dev/null
+++ b/releasenotes/notes/add-identity-v3-clients-as-a-library-d34b4fdf376984ad.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Define the identity v3 service client domains_client as a library.
+ Add domains_client to the library interface so the other
+ projects can use this module as a stable library without any
+ maintenance changes.
diff --git a/releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml b/releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml
new file mode 100644
index 0000000..9d1a003
--- /dev/null
+++ b/releasenotes/notes/add-image-clients-tests-49dbc0a0a4281a77.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ As in the [doc]:
+ http://developer.openstack.org/api-ref/image/v2/metadefs-index.html,
+ there are some apis are not included, add them.
+
+ * namespace_objects_client(v2)
+
diff --git a/releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml b/releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
new file mode 100644
index 0000000..9a4e6b1
--- /dev/null
+++ b/releasenotes/notes/add-snapshot-manage-client-as-library-a76ffdba9d8d01cb.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Define v2 snapshot_manage_client client for the volume service as
+ library interfaces, allowing other projects to use this module as
+ stable libraries without maintenance changes.
+
+ * snapshot_manage_client(v2)
diff --git a/releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml b/releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
new file mode 100644
index 0000000..c0a06d1
--- /dev/null
+++ b/releasenotes/notes/deprecate-identity-feature-enabled.reseller-84800a8232fe217f.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - The default value for the ``reseller`` option in the
+ ``identity-feature-enabled`` section has been changed from ``False``
+ to ``True``.
+deprecations:
+ - The ``reseller`` option in the ``identity-feature-enabled`` section is now
+ deprecated.
diff --git a/releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml b/releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
new file mode 100644
index 0000000..c80f159
--- /dev/null
+++ b/releasenotes/notes/deprecate-volume_feature_enabled.volume_services-dbe024ea067d5ab2.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - The default value for the ``volume_services`` option in the
+ ``volume_feature_enabled`` section has been changed from ``False``
+ to ``True``.
+deprecations:
+ - The ``volume_services`` option in the ``volume_feature_enabled`` section
+ is now deprecated.
diff --git a/tempest/api/baremetal/README.rst b/tempest/api/baremetal/README.rst
deleted file mode 100644
index 759c937..0000000
--- a/tempest/api/baremetal/README.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-Tempest Field Guide to Baremetal API tests
-==========================================
-
-
-What are these tests?
----------------------
-
-These tests stress the OpenStack baremetal provisioning API provided by
-Ironic.
-
-
-Why are these tests in tempest?
-------------------------------
-
-The purpose of these tests is to exercise the various APIs provided by Ironic
-for managing baremetal nodes.
-
-
-Scope of these tests
---------------------
-
-The baremetal API test perform basic CRUD operations on the Ironic node
-inventory. They do not actually perform hardware provisioning. It is important
-to note that all Ironic API actions are admin operations meant to be used
-either by cloud operators or other OpenStack services (i.e., Nova).
diff --git a/tempest/api/baremetal/__init__.py b/tempest/api/baremetal/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api/baremetal/__init__.py
+++ /dev/null
diff --git a/tempest/api/baremetal/admin/__init__.py b/tempest/api/baremetal/admin/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api/baremetal/admin/__init__.py
+++ /dev/null
diff --git a/tempest/api/baremetal/admin/base.py b/tempest/api/baremetal/admin/base.py
deleted file mode 100644
index ac5986c..0000000
--- a/tempest/api/baremetal/admin/base.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-
-from tempest.common.utils import data_utils
-from tempest import config
-from tempest.lib import exceptions as lib_exc
-from tempest import test
-
-CONF = config.CONF
-
-
-# NOTE(adam_g): The baremetal API tests exercise operations such as enroll
-# node, power on, power off, etc. Testing against real drivers (ie, IPMI)
-# will require passing driver-specific data to Tempest (addresses,
-# credentials, etc). Until then, only support testing against the fake driver,
-# which has no external dependencies.
-SUPPORTED_DRIVERS = ['fake']
-
-# NOTE(jroll): resources must be deleted in a specific order, this list
-# defines the resource types to clean up, and the correct order.
-RESOURCE_TYPES = ['port', 'node', 'chassis']
-
-
-def creates(resource):
- """Decorator that adds resources to the appropriate cleanup list."""
-
- def decorator(f):
- @functools.wraps(f)
- def wrapper(cls, *args, **kwargs):
- resp, body = f(cls, *args, **kwargs)
-
- if 'uuid' in body:
- cls.created_objects[resource].add(body['uuid'])
-
- return resp, body
- return wrapper
- return decorator
-
-
-class BaseBaremetalTest(test.BaseTestCase):
- """Base class for Baremetal API tests."""
-
- credentials = ['admin']
-
- @classmethod
- def skip_checks(cls):
- super(BaseBaremetalTest, cls).skip_checks()
- if not CONF.service_available.ironic:
- skip_msg = ('%s skipped as Ironic is not available' % cls.__name__)
- raise cls.skipException(skip_msg)
-
- if CONF.baremetal.driver not in SUPPORTED_DRIVERS:
- skip_msg = ('%s skipped as Ironic driver %s is not supported for '
- 'testing.' %
- (cls.__name__, CONF.baremetal.driver))
- raise cls.skipException(skip_msg)
-
- @classmethod
- def setup_clients(cls):
- super(BaseBaremetalTest, cls).setup_clients()
- cls.client = cls.os_admin.baremetal_client
-
- @classmethod
- def resource_setup(cls):
- super(BaseBaremetalTest, cls).resource_setup()
-
- cls.driver = CONF.baremetal.driver
- cls.power_timeout = CONF.baremetal.power_timeout
- cls.created_objects = {}
- for resource in RESOURCE_TYPES:
- cls.created_objects[resource] = set()
-
- @classmethod
- def resource_cleanup(cls):
- """Ensure that all created objects get destroyed."""
-
- try:
- for resource in RESOURCE_TYPES:
- uuids = cls.created_objects[resource]
- delete_method = getattr(cls.client, 'delete_%s' % resource)
- for u in uuids:
- delete_method(u, ignore_errors=lib_exc.NotFound)
- finally:
- super(BaseBaremetalTest, cls).resource_cleanup()
-
- @classmethod
- @creates('chassis')
- def create_chassis(cls, description=None):
- """Wrapper utility for creating test chassis.
-
- :param description: A description of the chassis. If not supplied,
- a random value will be generated.
- :return: Created chassis.
-
- """
- description = description or data_utils.rand_name('test-chassis')
- resp, body = cls.client.create_chassis(description=description)
- return resp, body
-
- @classmethod
- @creates('node')
- def create_node(cls, chassis_id, cpu_arch='x86', cpus=8, local_gb=10,
- memory_mb=4096):
- """Wrapper utility for creating test baremetal nodes.
-
- :param chassis_id: The unique identifier of the chassis.
- :param cpu_arch: CPU architecture of the node. Default: x86.
- :param cpus: Number of CPUs. Default: 8.
- :param local_gb: Disk size. Default: 10.
- :param memory_mb: Available RAM. Default: 4096.
- :return: Created node.
-
- """
- resp, body = cls.client.create_node(chassis_id, cpu_arch=cpu_arch,
- cpus=cpus, local_gb=local_gb,
- memory_mb=memory_mb,
- driver=cls.driver)
-
- return resp, body
-
- @classmethod
- @creates('port')
- def create_port(cls, node_id, address, extra=None, uuid=None):
- """Wrapper utility for creating test ports.
-
- :param node_id: The unique identifier of the node.
- :param address: MAC address of the port.
- :param extra: Meta data of the port. If not supplied, an empty
- dictionary will be created.
- :param uuid: UUID of the port.
- :return: Created port.
-
- """
- extra = extra or {}
- resp, body = cls.client.create_port(address=address, node_id=node_id,
- extra=extra, uuid=uuid)
-
- return resp, body
-
- @classmethod
- def delete_chassis(cls, chassis_id):
- """Deletes a chassis having the specified UUID.
-
- :param chassis_id: The unique identifier of the chassis.
- :return: Server response.
-
- """
-
- resp, body = cls.client.delete_chassis(chassis_id)
-
- if chassis_id in cls.created_objects['chassis']:
- cls.created_objects['chassis'].remove(chassis_id)
-
- return resp
-
- @classmethod
- def delete_node(cls, node_id):
- """Deletes a node having the specified UUID.
-
- :param node_id: The unique identifier of the node.
- :return: Server response.
-
- """
-
- resp, body = cls.client.delete_node(node_id)
-
- if node_id in cls.created_objects['node']:
- cls.created_objects['node'].remove(node_id)
-
- return resp
-
- @classmethod
- def delete_port(cls, port_id):
- """Deletes a port having the specified UUID.
-
- :param port_id: The unique identifier of the port.
- :return: Server response.
-
- """
-
- resp, body = cls.client.delete_port(port_id)
-
- if port_id in cls.created_objects['port']:
- cls.created_objects['port'].remove(port_id)
-
- return resp
-
- def validate_self_link(self, resource, uuid, link):
- """Check whether the given self link formatted correctly."""
- expected_link = "{base}/{pref}/{res}/{uuid}".format(
- base=self.client.base_url,
- pref=self.client.uri_prefix,
- res=resource,
- uuid=uuid)
- self.assertEqual(expected_link, link)
diff --git a/tempest/api/baremetal/admin/test_api_discovery.py b/tempest/api/baremetal/admin/test_api_discovery.py
deleted file mode 100644
index 41388ad..0000000
--- a/tempest/api/baremetal/admin/test_api_discovery.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.baremetal.admin import base
-from tempest import test
-
-
-class TestApiDiscovery(base.BaseBaremetalTest):
- """Tests for API discovery features."""
-
- @test.idempotent_id('a3c27e94-f56c-42c4-8600-d6790650b9c5')
- def test_api_versions(self):
- _, descr = self.client.get_api_description()
- expected_versions = ('v1',)
- versions = [version['id'] for version in descr['versions']]
-
- for v in expected_versions:
- self.assertIn(v, versions)
-
- @test.idempotent_id('896283a6-488e-4f31-af78-6614286cbe0d')
- def test_default_version(self):
- _, descr = self.client.get_api_description()
- default_version = descr['default_version']
- self.assertEqual(default_version['id'], 'v1')
-
- @test.idempotent_id('abc0b34d-e684-4546-9728-ab7a9ad9f174')
- def test_version_1_resources(self):
- _, descr = self.client.get_version_description(version='v1')
- expected_resources = ('nodes', 'chassis',
- 'ports', 'links', 'media_types')
-
- for res in expected_resources:
- self.assertIn(res, descr)
diff --git a/tempest/api/baremetal/admin/test_chassis.py b/tempest/api/baremetal/admin/test_chassis.py
deleted file mode 100644
index 339aaea..0000000
--- a/tempest/api/baremetal/admin/test_chassis.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# -*- coding: utf-8 -*-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from tempest.api.baremetal.admin import base
-from tempest.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-from tempest import test
-
-
-class TestChassis(base.BaseBaremetalTest):
- """Tests for chassis."""
-
- @classmethod
- def resource_setup(cls):
- super(TestChassis, cls).resource_setup()
- _, cls.chassis = cls.create_chassis()
-
- def _assertExpected(self, expected, actual):
- # Check if not expected keys/values exists in actual response body
- for key, value in six.iteritems(expected):
- if key not in ('created_at', 'updated_at'):
- self.assertIn(key, actual)
- self.assertEqual(value, actual[key])
-
- @test.idempotent_id('7c5a2e09-699c-44be-89ed-2bc189992d42')
- def test_create_chassis(self):
- descr = data_utils.rand_name('test-chassis')
- _, chassis = self.create_chassis(description=descr)
- self.assertEqual(chassis['description'], descr)
-
- @test.idempotent_id('cabe9c6f-dc16-41a7-b6b9-0a90c212edd5')
- def test_create_chassis_unicode_description(self):
- # Use a unicode string for testing:
- # 'We ♡ OpenStack in Ukraine'
- descr = u'В Україні ♡ OpenStack!'
- _, chassis = self.create_chassis(description=descr)
- self.assertEqual(chassis['description'], descr)
-
- @test.idempotent_id('c84644df-31c4-49db-a307-8942881f41c0')
- def test_show_chassis(self):
- _, chassis = self.client.show_chassis(self.chassis['uuid'])
- self._assertExpected(self.chassis, chassis)
-
- @test.idempotent_id('29c9cd3f-19b5-417b-9864-99512c3b33b3')
- def test_list_chassis(self):
- _, body = self.client.list_chassis()
- self.assertIn(self.chassis['uuid'],
- [i['uuid'] for i in body['chassis']])
-
- @test.idempotent_id('5ae649ad-22d1-4fe1-bbc6-97227d199fb3')
- def test_delete_chassis(self):
- _, body = self.create_chassis()
- uuid = body['uuid']
-
- self.delete_chassis(uuid)
- self.assertRaises(lib_exc.NotFound, self.client.show_chassis, uuid)
-
- @test.idempotent_id('cda8a41f-6be2-4cbf-840c-994b00a89b44')
- def test_update_chassis(self):
- _, body = self.create_chassis()
- uuid = body['uuid']
-
- new_description = data_utils.rand_name('new-description')
- _, body = (self.client.update_chassis(uuid,
- description=new_description))
- _, chassis = self.client.show_chassis(uuid)
- self.assertEqual(chassis['description'], new_description)
-
- @test.idempotent_id('76305e22-a4e2-4ab3-855c-f4e2368b9335')
- def test_chassis_node_list(self):
- _, node = self.create_node(self.chassis['uuid'])
- _, body = self.client.list_chassis_nodes(self.chassis['uuid'])
- self.assertIn(node['uuid'], [n['uuid'] for n in body['nodes']])
diff --git a/tempest/api/baremetal/admin/test_drivers.py b/tempest/api/baremetal/admin/test_drivers.py
deleted file mode 100644
index f08d7ab..0000000
--- a/tempest/api/baremetal/admin/test_drivers.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.baremetal.admin import base
-from tempest import config
-from tempest import test
-
-CONF = config.CONF
-
-
-class TestDrivers(base.BaseBaremetalTest):
- """Tests for drivers."""
- @classmethod
- def resource_setup(cls):
- super(TestDrivers, cls).resource_setup()
- cls.driver_name = CONF.baremetal.driver
-
- @test.idempotent_id('5aed2790-7592-4655-9b16-99abcc2e6ec5')
- def test_list_drivers(self):
- _, drivers = self.client.list_drivers()
- self.assertIn(self.driver_name,
- [d['name'] for d in drivers['drivers']])
-
- @test.idempotent_id('fb3287a3-c4d7-44bf-ae9d-1eef906d78ce')
- def test_show_driver(self):
- _, driver = self.client.show_driver(self.driver_name)
- self.assertEqual(self.driver_name, driver['name'])
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
deleted file mode 100644
index 2c44665..0000000
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from tempest.api.baremetal.admin import base
-from tempest.common.utils import data_utils
-from tempest.common import waiters
-from tempest.lib import exceptions as lib_exc
-from tempest import test
-
-
-class TestNodes(base.BaseBaremetalTest):
- """Tests for baremetal nodes."""
-
- def setUp(self):
- super(TestNodes, self).setUp()
-
- _, self.chassis = self.create_chassis()
- _, self.node = self.create_node(self.chassis['uuid'])
-
- def _assertExpected(self, expected, actual):
- # Check if not expected keys/values exists in actual response body
- for key, value in six.iteritems(expected):
- if key not in ('created_at', 'updated_at'):
- self.assertIn(key, actual)
- self.assertEqual(value, actual[key])
-
- def _associate_node_with_instance(self):
- self.client.set_node_power_state(self.node['uuid'], 'power off')
- waiters.wait_for_bm_node_status(self.client, self.node['uuid'],
- 'power_state', 'power off')
- instance_uuid = data_utils.rand_uuid()
- self.client.update_node(self.node['uuid'],
- instance_uuid=instance_uuid)
- self.addCleanup(self.client.update_node,
- uuid=self.node['uuid'], instance_uuid=None)
- return instance_uuid
-
- @test.idempotent_id('4e939eb2-8a69-4e84-8652-6fffcbc9db8f')
- def test_create_node(self):
- params = {'cpu_arch': 'x86_64',
- 'cpus': '12',
- 'local_gb': '10',
- 'memory_mb': '1024'}
-
- _, body = self.create_node(self.chassis['uuid'], **params)
- self._assertExpected(params, body['properties'])
-
- @test.idempotent_id('9ade60a4-505e-4259-9ec4-71352cbbaf47')
- def test_delete_node(self):
- _, node = self.create_node(self.chassis['uuid'])
-
- self.delete_node(node['uuid'])
-
- self.assertRaises(lib_exc.NotFound, self.client.show_node,
- node['uuid'])
-
- @test.idempotent_id('55451300-057c-4ecf-8255-ba42a83d3a03')
- def test_show_node(self):
- _, loaded_node = self.client.show_node(self.node['uuid'])
- self._assertExpected(self.node, loaded_node)
-
- @test.idempotent_id('4ca123c4-160d-4d8d-a3f7-15feda812263')
- def test_list_nodes(self):
- _, body = self.client.list_nodes()
- self.assertIn(self.node['uuid'],
- [i['uuid'] for i in body['nodes']])
-
- @test.idempotent_id('85b1f6e0-57fd-424c-aeff-c3422920556f')
- def test_list_nodes_association(self):
- _, body = self.client.list_nodes(associated=True)
- self.assertNotIn(self.node['uuid'],
- [n['uuid'] for n in body['nodes']])
-
- self._associate_node_with_instance()
-
- _, body = self.client.list_nodes(associated=True)
- self.assertIn(self.node['uuid'], [n['uuid'] for n in body['nodes']])
-
- _, body = self.client.list_nodes(associated=False)
- self.assertNotIn(self.node['uuid'], [n['uuid'] for n in body['nodes']])
-
- @test.idempotent_id('18c4ebd8-f83a-4df7-9653-9fb33a329730')
- def test_node_port_list(self):
- _, port = self.create_port(self.node['uuid'],
- data_utils.rand_mac_address())
- _, body = self.client.list_node_ports(self.node['uuid'])
- self.assertIn(port['uuid'],
- [p['uuid'] for p in body['ports']])
-
- @test.idempotent_id('72591acb-f215-49db-8395-710d14eb86ab')
- def test_node_port_list_no_ports(self):
- _, node = self.create_node(self.chassis['uuid'])
- _, body = self.client.list_node_ports(node['uuid'])
- self.assertEmpty(body['ports'])
-
- @test.idempotent_id('4fed270a-677a-4d19-be87-fd38ae490320')
- def test_update_node(self):
- props = {'cpu_arch': 'x86_64',
- 'cpus': '12',
- 'local_gb': '10',
- 'memory_mb': '128'}
-
- _, node = self.create_node(self.chassis['uuid'], **props)
-
- new_p = {'cpu_arch': 'x86',
- 'cpus': '1',
- 'local_gb': '10000',
- 'memory_mb': '12300'}
-
- _, body = self.client.update_node(node['uuid'], properties=new_p)
- _, node = self.client.show_node(node['uuid'])
- self._assertExpected(new_p, node['properties'])
-
- @test.idempotent_id('cbf1f515-5f4b-4e49-945c-86bcaccfeb1d')
- def test_validate_driver_interface(self):
- _, body = self.client.validate_driver_interface(self.node['uuid'])
- core_interfaces = ['power', 'deploy']
- for interface in core_interfaces:
- self.assertIn(interface, body)
-
- @test.idempotent_id('5519371c-26a2-46e9-aa1a-f74226e9d71f')
- def test_set_node_boot_device(self):
- self.client.set_node_boot_device(self.node['uuid'], 'pxe')
-
- @test.idempotent_id('9ea73775-f578-40b9-bc34-efc639c4f21f')
- def test_get_node_boot_device(self):
- body = self.client.get_node_boot_device(self.node['uuid'])
- self.assertIn('boot_device', body)
- self.assertIn('persistent', body)
- self.assertIsInstance(body['boot_device'], six.string_types)
- self.assertIsInstance(body['persistent'], bool)
-
- @test.idempotent_id('3622bc6f-3589-4bc2-89f3-50419c66b133')
- def test_get_node_supported_boot_devices(self):
- body = self.client.get_node_supported_boot_devices(self.node['uuid'])
- self.assertIn('supported_boot_devices', body)
- self.assertIsInstance(body['supported_boot_devices'], list)
-
- @test.idempotent_id('f63b6288-1137-4426-8cfe-0d5b7eb87c06')
- def test_get_console(self):
- _, body = self.client.get_console(self.node['uuid'])
- con_info = ['console_enabled', 'console_info']
- for key in con_info:
- self.assertIn(key, body)
-
- @test.idempotent_id('80504575-9b21-4670-92d1-143b948f9437')
- def test_set_console_mode(self):
- self.client.set_console_mode(self.node['uuid'], True)
-
- _, body = self.client.get_console(self.node['uuid'])
- self.assertEqual(True, body['console_enabled'])
-
- @test.idempotent_id('b02a4f38-5e8b-44b2-aed2-a69a36ecfd69')
- def test_get_node_by_instance_uuid(self):
- instance_uuid = self._associate_node_with_instance()
- _, body = self.client.show_node_by_instance_uuid(instance_uuid)
- self.assertEqual(len(body['nodes']), 1)
- self.assertIn(self.node['uuid'], [n['uuid'] for n in body['nodes']])
diff --git a/tempest/api/baremetal/admin/test_nodestates.py b/tempest/api/baremetal/admin/test_nodestates.py
deleted file mode 100644
index e74dd04..0000000
--- a/tempest/api/baremetal/admin/test_nodestates.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_utils import timeutils
-
-from tempest.api.baremetal.admin import base
-from tempest.lib import exceptions
-from tempest import test
-
-
-class TestNodeStates(base.BaseBaremetalTest):
- """Tests for baremetal NodeStates."""
-
- @classmethod
- def resource_setup(cls):
- super(TestNodeStates, cls).resource_setup()
- _, cls.chassis = cls.create_chassis()
- _, cls.node = cls.create_node(cls.chassis['uuid'])
-
- def _validate_power_state(self, node_uuid, power_state):
- # Validate that power state is set within timeout
- if power_state == 'rebooting':
- power_state = 'power on'
- start = timeutils.utcnow()
- while timeutils.delta_seconds(
- start, timeutils.utcnow()) < self.power_timeout:
- _, node = self.client.show_node(node_uuid)
- if node['power_state'] == power_state:
- return
- message = ('Failed to set power state within '
- 'the required time: %s sec.' % self.power_timeout)
- raise exceptions.TimeoutException(message)
-
- @test.idempotent_id('cd8afa5e-3f57-4e43-8185-beb83d3c9015')
- def test_list_nodestates(self):
- _, nodestates = self.client.list_nodestates(self.node['uuid'])
- for key in nodestates:
- self.assertEqual(nodestates[key], self.node[key])
-
- @test.idempotent_id('fc5b9320-0c98-4e5a-8848-877fe5a0322c')
- def test_set_node_power_state(self):
- _, node = self.create_node(self.chassis['uuid'])
- states = ["power on", "rebooting", "power off"]
- for state in states:
- # Set power state
- self.client.set_node_power_state(node['uuid'], state)
- # Check power state after state is set
- self._validate_power_state(node['uuid'], state)
diff --git a/tempest/api/baremetal/admin/test_ports.py b/tempest/api/baremetal/admin/test_ports.py
deleted file mode 100644
index ce519c1..0000000
--- a/tempest/api/baremetal/admin/test_ports.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from tempest.api.baremetal.admin import base
-from tempest.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-from tempest import test
-
-
-class TestPorts(base.BaseBaremetalTest):
- """Tests for ports."""
-
- def setUp(self):
- super(TestPorts, self).setUp()
-
- _, self.chassis = self.create_chassis()
- _, self.node = self.create_node(self.chassis['uuid'])
- _, self.port = self.create_port(self.node['uuid'],
- data_utils.rand_mac_address())
-
- def _assertExpected(self, expected, actual):
- # Check if not expected keys/values exists in actual response body
- for key, value in six.iteritems(expected):
- if key not in ('created_at', 'updated_at'):
- self.assertIn(key, actual)
- self.assertEqual(value, actual[key])
-
- @test.idempotent_id('83975898-2e50-42ed-b5f0-e510e36a0b56')
- def test_create_port(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
-
- _, body = self.client.show_port(port['uuid'])
-
- self._assertExpected(port, body)
-
- @test.idempotent_id('d1f6b249-4cf6-4fe6-9ed6-a6e84b1bf67b')
- def test_create_port_specifying_uuid(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- uuid = data_utils.rand_uuid()
-
- _, port = self.create_port(node_id=node_id,
- address=address, uuid=uuid)
-
- _, body = self.client.show_port(uuid)
- self._assertExpected(port, body)
-
- @test.idempotent_id('4a02c4b0-6573-42a4-a513-2e36ad485b62')
- def test_create_port_with_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'str': 'value', 'int': 123, 'float': 0.123,
- 'bool': True, 'list': [1, 2, 3], 'dict': {'foo': 'bar'}}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
-
- _, body = self.client.show_port(port['uuid'])
- self._assertExpected(port, body)
-
- @test.idempotent_id('1bf257a9-aea3-494e-89c0-63f657ab4fdd')
- def test_delete_port(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- _, port = self.create_port(node_id=node_id, address=address)
-
- self.delete_port(port['uuid'])
-
- self.assertRaises(lib_exc.NotFound, self.client.show_port,
- port['uuid'])
-
- @test.idempotent_id('9fa77ab5-ce59-4f05-baac-148904ba1597')
- def test_show_port(self):
- _, port = self.client.show_port(self.port['uuid'])
- self._assertExpected(self.port, port)
-
- @test.idempotent_id('7c1114ff-fc3f-47bb-bc2f-68f61620ba8b')
- def test_show_port_by_address(self):
- _, port = self.client.show_port_by_address(self.port['address'])
- self._assertExpected(self.port, port['ports'][0])
-
- @test.idempotent_id('bd773405-aea5-465d-b576-0ab1780069e5')
- def test_show_port_with_links(self):
- _, port = self.client.show_port(self.port['uuid'])
- self.assertIn('links', port.keys())
- self.assertEqual(2, len(port['links']))
- self.assertIn(port['uuid'], port['links'][0]['href'])
-
- @test.idempotent_id('b5e91854-5cd7-4a8e-bb35-3e0a1314606d')
- def test_list_ports(self):
- _, body = self.client.list_ports()
- self.assertIn(self.port['uuid'],
- [i['uuid'] for i in body['ports']])
- # Verify self links.
- for port in body['ports']:
- self.validate_self_link('ports', port['uuid'],
- port['links'][0]['href'])
-
- @test.idempotent_id('324a910e-2f80-4258-9087-062b5ae06240')
- def test_list_with_limit(self):
- _, body = self.client.list_ports(limit=3)
-
- next_marker = body['ports'][-1]['uuid']
- self.assertIn(next_marker, body['next'])
-
- @test.idempotent_id('8a94b50f-9895-4a63-a574-7ecff86e5875')
- def test_list_ports_details(self):
- node_id = self.node['uuid']
-
- uuids = [
- self.create_port(node_id=node_id,
- address=data_utils.rand_mac_address())
- [1]['uuid'] for i in range(0, 5)]
-
- _, body = self.client.list_ports_detail()
-
- ports_dict = dict((port['uuid'], port) for port in body['ports']
- if port['uuid'] in uuids)
-
- for uuid in uuids:
- self.assertIn(uuid, ports_dict)
- port = ports_dict[uuid]
- self.assertIn('extra', port)
- self.assertIn('node_uuid', port)
- # never expose the node_id
- self.assertNotIn('node_id', port)
- # Verify self link.
- self.validate_self_link('ports', port['uuid'],
- port['links'][0]['href'])
-
- @test.idempotent_id('8a03f688-7d75-4ecd-8cbc-e06b8f346738')
- def test_list_ports_details_with_address(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- self.create_port(node_id=node_id, address=address)
- for i in range(0, 5):
- self.create_port(node_id=node_id,
- address=data_utils.rand_mac_address())
-
- _, body = self.client.list_ports_detail(address=address)
- self.assertEqual(1, len(body['ports']))
- self.assertEqual(address, body['ports'][0]['address'])
-
- @test.idempotent_id('9c26298b-1bcb-47b7-9b9e-8bdd6e3c4aba')
- def test_update_port_replace(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
-
- new_address = data_utils.rand_mac_address()
- new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
- 'key3': 'new-value3'}
-
- patch = [{'path': '/address',
- 'op': 'replace',
- 'value': new_address},
- {'path': '/extra/key1',
- 'op': 'replace',
- 'value': new_extra['key1']},
- {'path': '/extra/key2',
- 'op': 'replace',
- 'value': new_extra['key2']},
- {'path': '/extra/key3',
- 'op': 'replace',
- 'value': new_extra['key3']}]
-
- self.client.update_port(port['uuid'], patch)
-
- _, body = self.client.show_port(port['uuid'])
- self.assertEqual(new_address, body['address'])
- self.assertEqual(new_extra, body['extra'])
-
- @test.idempotent_id('d7e7fece-6ed9-460a-9ebe-9267217e8580')
- def test_update_port_remove(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
-
- # Removing one item from the collection
- self.client.update_port(port['uuid'],
- [{'path': '/extra/key2',
- 'op': 'remove'}])
- extra.pop('key2')
- _, body = self.client.show_port(port['uuid'])
- self.assertEqual(extra, body['extra'])
-
- # Removing the collection
- self.client.update_port(port['uuid'], [{'path': '/extra',
- 'op': 'remove'}])
- _, body = self.client.show_port(port['uuid'])
- self.assertEqual({}, body['extra'])
-
- # Assert nothing else was changed
- self.assertEqual(node_id, body['node_uuid'])
- self.assertEqual(address, body['address'])
-
- @test.idempotent_id('241288b3-e98a-400f-a4d7-d1f716146361')
- def test_update_port_add(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
-
- extra = {'key1': 'value1', 'key2': 'value2'}
-
- patch = [{'path': '/extra/key1',
- 'op': 'add',
- 'value': extra['key1']},
- {'path': '/extra/key2',
- 'op': 'add',
- 'value': extra['key2']}]
-
- self.client.update_port(port['uuid'], patch)
-
- _, body = self.client.show_port(port['uuid'])
- self.assertEqual(extra, body['extra'])
-
- @test.idempotent_id('5309e897-0799-4649-a982-0179b04c3876')
- def test_update_port_mixed_ops(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key1': 'value1', 'key2': 'value2'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
-
- new_address = data_utils.rand_mac_address()
- new_extra = {'key1': 0.123, 'key3': {'cat': 'meow'}}
-
- patch = [{'path': '/address',
- 'op': 'replace',
- 'value': new_address},
- {'path': '/extra/key1',
- 'op': 'replace',
- 'value': new_extra['key1']},
- {'path': '/extra/key2',
- 'op': 'remove'},
- {'path': '/extra/key3',
- 'op': 'add',
- 'value': new_extra['key3']}]
-
- self.client.update_port(port['uuid'], patch)
-
- _, body = self.client.show_port(port['uuid'])
- self.assertEqual(new_address, body['address'])
- self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/admin/test_ports_negative.py b/tempest/api/baremetal/admin/test_ports_negative.py
deleted file mode 100644
index 5e3a33f..0000000
--- a/tempest/api/baremetal/admin/test_ports_negative.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.baremetal.admin import base
-from tempest.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-from tempest import test
-
-
-class TestPortsNegative(base.BaseBaremetalTest):
- """Negative tests for ports."""
-
- def setUp(self):
- super(TestPortsNegative, self).setUp()
-
- _, self.chassis = self.create_chassis()
- _, self.node = self.create_node(self.chassis['uuid'])
-
- @test.attr(type=['negative'])
- @test.idempotent_id('0a6ee1f7-d0d9-4069-8778-37f3aa07303a')
- def test_create_port_malformed_mac(self):
- node_id = self.node['uuid']
- address = 'malformed:mac'
-
- self.assertRaises(lib_exc.BadRequest,
- self.create_port, node_id=node_id, address=address)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('30277ee8-0c60-4f1d-b125-0e51c2f43369')
- def test_create_port_nonexsistent_node_id(self):
- node_id = data_utils.rand_uuid()
- address = data_utils.rand_mac_address()
- self.assertRaises(lib_exc.BadRequest, self.create_port,
- node_id=node_id, address=address)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('029190f6-43e1-40a3-b64a-65173ba653a3')
- def test_show_port_malformed_uuid(self):
- self.assertRaises(lib_exc.BadRequest, self.client.show_port,
- 'malformed:uuid')
-
- @test.attr(type=['negative'])
- @test.idempotent_id('0d00e13d-e2e0-45b1-bcbc-55a6d90ca793')
- def test_show_port_nonexistent_uuid(self):
- self.assertRaises(lib_exc.NotFound, self.client.show_port,
- data_utils.rand_uuid())
-
- @test.attr(type=['negative'])
- @test.idempotent_id('4ad85266-31e9-4942-99ac-751897dc9e23')
- def test_show_port_by_mac_not_allowed(self):
- self.assertRaises(lib_exc.BadRequest, self.client.show_port,
- data_utils.rand_mac_address())
-
- @test.attr(type=['negative'])
- @test.idempotent_id('89a34380-3c61-4c32-955c-2cd9ce94da21')
- def test_create_port_duplicated_port_uuid(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- uuid = data_utils.rand_uuid()
-
- self.create_port(node_id=node_id, address=address, uuid=uuid)
- self.assertRaises(lib_exc.Conflict, self.create_port, node_id=node_id,
- address=address, uuid=uuid)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('65e84917-733c-40ae-ae4b-96a4adff931c')
- def test_create_port_no_mandatory_field_node_id(self):
- address = data_utils.rand_mac_address()
-
- self.assertRaises(lib_exc.BadRequest, self.create_port, node_id=None,
- address=address)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('bcea3476-7033-4183-acfe-e56a30809b46')
- def test_create_port_no_mandatory_field_mac(self):
- node_id = self.node['uuid']
-
- self.assertRaises(lib_exc.BadRequest, self.create_port,
- node_id=node_id, address=None)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('2b51cd18-fb95-458b-9780-e6257787b649')
- def test_create_port_malformed_port_uuid(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- uuid = 'malformed:uuid'
-
- self.assertRaises(lib_exc.BadRequest, self.create_port,
- node_id=node_id, address=address, uuid=uuid)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('583a6856-6a30-4ac4-889f-14e2adff8105')
- def test_create_port_malformed_node_id(self):
- address = data_utils.rand_mac_address()
- self.assertRaises(lib_exc.BadRequest, self.create_port,
- node_id='malformed:nodeid', address=address)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('e27f8b2e-42c6-4a43-a3cd-accff716bc5c')
- def test_create_port_duplicated_mac(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- self.create_port(node_id=node_id, address=address)
- self.assertRaises(lib_exc.Conflict,
- self.create_port, node_id=node_id,
- address=address)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('8907082d-ac5e-4be3-b05f-d072ede82020')
- def test_update_port_by_mac_not_allowed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key': 'value'}
-
- self.create_port(node_id=node_id, address=address, extra=extra)
-
- patch = [{'path': '/extra/key',
- 'op': 'replace',
- 'value': 'new-value'}]
-
- self.assertRaises(lib_exc.BadRequest,
- self.client.update_port, address,
- patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('df1ac70c-db9f-41d9-90f1-78cd6b905718')
- def test_update_port_nonexistent(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key': 'value'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- port_id = port['uuid']
-
- _, body = self.client.delete_port(port_id)
-
- patch = [{'path': '/extra/key',
- 'op': 'replace',
- 'value': 'new-value'}]
- self.assertRaises(lib_exc.NotFound,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('c701e315-aa52-41ea-817c-65c5ca8ca2a8')
- def test_update_port_malformed_port_uuid(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- self.create_port(node_id=node_id, address=address)
-
- new_address = data_utils.rand_mac_address()
- self.assertRaises(lib_exc.BadRequest, self.client.update_port,
- uuid='malformed:uuid',
- patch=[{'path': '/address', 'op': 'replace',
- 'value': new_address}])
-
- @test.attr(type=['negative'])
- @test.idempotent_id('f8f15803-34d6-45dc-b06f-e5e04bf1b38b')
- def test_update_port_add_nonexistent_property(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(lib_exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/nonexistent', ' op': 'add',
- 'value': 'value'}])
-
- @test.attr(type=['negative'])
- @test.idempotent_id('898ec904-38b1-4fcb-9584-1187d4263a2a')
- def test_update_port_replace_node_id_with_malformed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- patch = [{'path': '/node_uuid',
- 'op': 'replace',
- 'value': 'malformed:node_uuid'}]
- self.assertRaises(lib_exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('2949f30f-5f59-43fa-a6d9-4eac578afab4')
- def test_update_port_replace_mac_with_duplicated(self):
- node_id = self.node['uuid']
- address1 = data_utils.rand_mac_address()
- address2 = data_utils.rand_mac_address()
-
- _, port1 = self.create_port(node_id=node_id, address=address1)
-
- _, port2 = self.create_port(node_id=node_id, address=address2)
- port_id = port2['uuid']
-
- patch = [{'path': '/address',
- 'op': 'replace',
- 'value': address1}]
- self.assertRaises(lib_exc.Conflict,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('97f6e048-6e4f-4eba-a09d-fbbc78b77a77')
- def test_update_port_replace_node_id_with_nonexistent(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- patch = [{'path': '/node_uuid',
- 'op': 'replace',
- 'value': data_utils.rand_uuid()}]
- self.assertRaises(lib_exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('375022c5-9e9e-4b11-9ca4-656729c0c9b2')
- def test_update_port_replace_mac_with_malformed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- patch = [{'path': '/address',
- 'op': 'replace',
- 'value': 'malformed:mac'}]
-
- self.assertRaises(lib_exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('5722b853-03fc-4854-8308-2036a1b67d85')
- def test_update_port_replace_nonexistent_property(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
-
- self.assertRaises(lib_exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('ae2696ca-930a-4a7f-918f-30ae97c60f56')
- def test_update_port_remove_mandatory_field_mac(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(lib_exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/address', 'op': 'remove'}])
-
- @test.attr(type=['negative'])
- @test.idempotent_id('5392c1f0-2071-4697-9064-ec2d63019018')
- def test_update_port_remove_mandatory_field_port_uuid(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(lib_exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/uuid', 'op': 'remove'}])
-
- @test.attr(type=['negative'])
- @test.idempotent_id('06b50d82-802a-47ef-b079-0a3311cf85a2')
- def test_update_port_remove_nonexistent_property(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(lib_exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/nonexistent', 'op': 'remove'}])
-
- @test.attr(type=['negative'])
- @test.idempotent_id('03d42391-2145-4a6c-95bf-63fe55eb64fd')
- def test_delete_port_by_mac_not_allowed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- self.create_port(node_id=node_id, address=address)
- self.assertRaises(lib_exc.BadRequest, self.client.delete_port, address)
-
- @test.attr(type=['negative'])
- @test.idempotent_id('0629e002-818e-4763-b25b-ae5e07b1cb23')
- def test_update_port_mixed_ops_integrity(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key1': 'value1', 'key2': 'value2'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- port_id = port['uuid']
-
- new_address = data_utils.rand_mac_address()
- new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
-
- patch = [{'path': '/address',
- 'op': 'replace',
- 'value': new_address},
- {'path': '/extra/key1',
- 'op': 'replace',
- 'value': new_extra['key1']},
- {'path': '/extra/key2',
- 'op': 'remove'},
- {'path': '/extra/key3',
- 'op': 'add',
- 'value': new_extra['key3']},
- {'path': '/nonexistent',
- 'op': 'replace',
- 'value': 'value'}]
-
- self.assertRaises(lib_exc.BadRequest, self.client.update_port, port_id,
- patch)
-
- # patch should not be applied
- _, body = self.client.show_port(port_id)
- self.assertEqual(address, body['address'])
- self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 40cb523..1c37cad 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -12,14 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log
-
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import test
-LOG = log.getLogger(__name__)
-
class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Agents API"""
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index 8e481fd..08e4072 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -17,7 +17,6 @@
from tempest.api.compute import base
from tempest.common import compute
from tempest.common import credentials_factory as credentials
-from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_excs
@@ -152,9 +151,7 @@
# create the server with no networking
server, _ = compute.create_test_server(
self.os, networks='none', wait_until='ACTIVE')
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# get the server ips
addresses = self.servers_client.list_addresses(
server['id'])['addresses']
@@ -182,9 +179,7 @@
min_count=3)
server_nets = set()
for server in servers:
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
# get the server ips
addresses = self.servers_client.list_addresses(
server['id'])['addresses']
diff --git a/tempest/api/compute/admin/test_baremetal_nodes.py b/tempest/api/compute/admin/test_baremetal_nodes.py
index b764483..722d9a6 100644
--- a/tempest/api/compute/admin/test_baremetal_nodes.py
+++ b/tempest/api/compute/admin/test_baremetal_nodes.py
@@ -25,7 +25,7 @@
@classmethod
def resource_setup(cls):
super(BaremetalNodesAdminTestJSON, cls).resource_setup()
- if not CONF.service_available.ironic:
+ if not getattr(CONF.service_available, 'ironic', False):
skip_msg = ('%s skipped as Ironic is not available' % cls.__name__)
raise cls.skipException(skip_msg)
cls.client = cls.os_adm.baremetal_nodes_client
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 72d5b18..ff84945 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -72,10 +72,9 @@
block_migration = (CONF.compute_feature_enabled.
block_migration_for_live_migration and
not volume_backed)
- body = self.admin_servers_client.live_migrate_server(
+ self.admin_servers_client.live_migrate_server(
server_id, host=dest_host, block_migration=block_migration,
**kwargs)
- return body
def _get_host_other_than(self, host):
for target_host in self._get_compute_hostnames():
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index ce0adb4..33b9bef 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -14,7 +14,6 @@
# under the License.
from oslo_log import log as logging
-import six
from testtools import matchers
from tempest.api.compute import base
@@ -175,7 +174,7 @@
# restore the defaults when the test is done
self.addCleanup(self._restore_default_quotas, body.copy())
# increment all of the values for updating the default quota class
- for quota, default in six.iteritems(body):
+ for quota, default in body.items():
# NOTE(sdague): we need to increment a lot, otherwise
# there is a real chance that we go from -1 (unlimited)
# to a very small number which causes issues.
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 23b16e7..206260f 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -72,8 +72,8 @@
raise self.skipException("ram quota set is -1,"
" cannot test overlimit")
ram += 1
- vcpus = 8
- disk = 10
+ vcpus = 1
+ disk = 5
flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
ram=ram, vcpus=vcpus,
disk=disk,
@@ -93,7 +93,6 @@
self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor")
flavor_id = self._get_unused_flavor_id()
- ram = 512
quota_set = self.quotas_client.show_quota_set(
self.tenant_id)['quota_set']
vcpus = int(quota_set['cores'])
@@ -101,7 +100,8 @@
raise self.skipException("cores quota set is -1,"
" cannot test overlimit")
vcpus += 1
- disk = 10
+ ram = 512
+ disk = 5
flavor_ref = self.flavors_client.create_flavor(name=flavor_name,
ram=ram, vcpus=vcpus,
disk=disk,
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index b9dac6f..26b8742 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -49,6 +49,7 @@
self.server['id'], nonexistent_volume,
volumeId=volume['id'])
+ @test.related_bug('1629110', status_code=400)
@test.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
def test_update_attached_volume_with_nonexistent_volume_in_body(self):
volume = self.create_volume()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index d7e01f0..d77ea90 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -139,15 +139,15 @@
test_utils.call_and_ignore_notfound_exc(
cls.servers_client.delete_server, server['id'])
except Exception:
- LOG.exception('Deleting server %s failed' % server['id'])
+ LOG.exception('Deleting server %s failed', server['id'])
for server in cls.servers:
try:
waiters.wait_for_server_termination(cls.servers_client,
server['id'])
except Exception:
- LOG.exception('Waiting for deletion of server %s failed'
- % server['id'])
+ LOG.exception('Waiting for deletion of server %s failed',
+ server['id'])
@classmethod
def server_check_teardown(cls):
@@ -179,7 +179,7 @@
test_utils.call_and_ignore_notfound_exc(
cls.compute_images_client.delete_image, image_id)
except Exception:
- LOG.exception('Exception raised deleting image %s' % image_id)
+ LOG.exception('Exception raised deleting image %s', image_id)
@classmethod
def clear_security_groups(cls):
@@ -283,7 +283,7 @@
volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warning("Unable to delete volume '%s' since it was not found. "
- "Maybe it was already deleted?" % volume_id)
+ "Maybe it was already deleted?", volume_id)
@classmethod
def prepare_instance_network(cls):
@@ -336,7 +336,7 @@
waiters.wait_for_server_termination(cls.servers_client,
server_id)
except Exception:
- LOG.exception('Failed to delete server %s' % server_id)
+ LOG.exception('Failed to delete server %s', server_id)
@classmethod
def resize_server(cls, server_id, new_flavor_id, **kwargs):
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 19e2880..0b4a2a8 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
@@ -23,7 +21,6 @@
from tempest import test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 1c9b3f1..cd71de7 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -47,8 +47,8 @@
waiters.wait_for_server_status(self.servers_client, self.server_id,
'ACTIVE')
except Exception:
- LOG.exception('server %s timed out to become ACTIVE. rebuilding'
- % self.server_id)
+ LOG.exception('server %s timed out to become ACTIVE. rebuilding',
+ self.server_id)
# Rebuild server if cannot reach the ACTIVE state
# Usually it means the server had a serious accident
self._reset_server()
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 38c294b..60caa19 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -43,7 +43,6 @@
group = {}
ip_range = {}
cls.expected = {
- 'id': None,
'parent_group_id': None,
'ip_protocol': cls.ip_protocol,
'from_port': from_port,
@@ -54,8 +53,6 @@
def _check_expected_response(self, actual_rule):
for key in self.expected:
- if key == 'id':
- continue
self.assertEqual(self.expected[key], actual_rule[key],
"Miss-matched key is %s" % key)
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index a21ce94..1731bf3 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -268,9 +268,7 @@
self.os, tenant_network=network, wait_until='ACTIVE', min_count=2)
# add our cleanups for the servers since we bypassed the base class
for server in servers:
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
for server in servers:
# attach the port to the server
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index d2e31ad..2dcacb7 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -253,24 +253,18 @@
self.flavor_ref)['flavor']
def create_flavor_with_ephemeral(ephem_disk):
- flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
+ flavor_id = data_utils.rand_int_id(start=1000)
+ name = 'flavor_with_ephemeral_%s' % ephem_disk
+ flavor_name = data_utils.rand_name(name)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
- if ephem_disk > 0:
- # Create a flavor with ephemeral disk
- flavor_name = data_utils.rand_name('eph_flavor')
- flavor = self.flavor_client.create_flavor(
- name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
- id=flavor_with_eph_disk_id, ephemeral=ephem_disk)['flavor']
- else:
- # Create a flavor without ephemeral disk
- flavor_name = data_utils.rand_name('no_eph_flavor')
- flavor = self.flavor_client.create_flavor(
- name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
- id=flavor_with_eph_disk_id)['flavor']
+ # Create a flavor with ephemeral disk
+ flavor = self.flavor_client.create_flavor(
+ name=flavor_name, ram=ram, vcpus=vcpus, disk=disk,
+ id=flavor_id, ephemeral=ephem_disk)['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index b2d5ae7..1d502be 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -19,7 +19,6 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
-from tempest.common import waiters
from tempest import config
from tempest.lib import exceptions
from tempest import test
@@ -196,9 +195,7 @@
}
])
- self.addCleanup(waiters.wait_for_server_termination,
- self.servers_client, server['id'])
- self.addCleanup(self.servers_client.delete_server, server['id'])
+ self.addCleanup(self.delete_server, server['id'])
self.ssh_client = remote_client.RemoteClient(
self.get_server_ip(server),
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 0a94d5e..0334eff 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -350,7 +350,7 @@
else:
LOG.warning("Deletion of oldest backup %s should not have "
"been successful as it should have been "
- "deleted during rotation." % oldest_backup)
+ "deleted during rotation.", oldest_backup)
image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index d31b6f8..549ba03 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from tempest.api.compute import base
from tempest import test
@@ -50,7 +48,7 @@
# We do not know the exact network configuration, but an instance
# should at least have a single public or private address
self.assertGreaterEqual(len(addresses), 1)
- for network_name, network_addresses in six.iteritems(addresses):
+ for network_name, network_addresses in addresses.items():
self.assertGreaterEqual(len(network_addresses), 1)
for address in network_addresses:
self.assertTrue(address['addr'])
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 1b1b339..f66bc72 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -217,6 +217,26 @@
name=server_name)
@test.attr(type=['negative'])
+ @test.related_bug('1651064', status_code=500)
+ @test.idempotent_id('12146ac1-d7df-4928-ad25-b1f99e5286cd')
+ def test_create_server_invalid_bdm_in_2nd_dict(self):
+ volume = self.create_volume()
+ bdm_1st = {"source_type": "image",
+ "delete_on_termination": True,
+ "boot_index": 0,
+ "uuid": self.image_ref,
+ "destination_type": "local"}
+ bdm_2nd = {"source_type": "volume",
+ "uuid": volume["id"],
+ "destination_type": "invalid"}
+ bdm = [bdm_1st, bdm_2nd]
+
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_test_server,
+ image_id=self.image_ref,
+ block_device_mapping_v2=bdm)
+
+ @test.attr(type=['negative'])
@test.idempotent_id('4e72dc2d-44c5-4336-9667-f7972e95c402')
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 6e57aff..d171cd5 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -42,7 +42,7 @@
raise self.skipException('There are not any extensions configured')
# Log extensions list
extension_list = map(lambda x: x['alias'], extensions)
- LOG.debug("Nova extensions: %s" % ','.join(extension_list))
+ LOG.debug("Nova extensions: %s", ','.join(extension_list))
@test.idempotent_id('05762f39-bdfa-4cdb-9b46-b78f8e78e2fd')
@test.requires_ext(extension='os-consoles', service='compute')
diff --git a/tempest/api/compute/test_live_block_migration_negative.py b/tempest/api/compute/test_live_block_migration_negative.py
index f072b81..7853962 100644
--- a/tempest/api/compute/test_live_block_migration_negative.py
+++ b/tempest/api/compute/test_live_block_migration_negative.py
@@ -37,10 +37,9 @@
def _migrate_server_to(self, server_id, dest_host):
bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
- body = self.admin_servers_client.live_migrate_server(
+ self.admin_servers_client.live_migrate_server(
server_id, host=dest_host, block_migration=bmflm,
disk_over_commit=False)
- return body
@test.attr(type=['negative'])
@test.idempotent_id('7fb7856e-ae92-44c9-861a-af62d7830bcb')
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 1edadef..30549ec 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -68,8 +68,8 @@
volume_id, 'available')
except lib_exc.NotFound:
LOG.warning("Unable to detach volume %s from server %s "
- "possibly it was already detached" % (volume_id,
- server_id))
+ "possibly it was already detached", volume_id,
+ server_id)
def _attach_volume(self, server_id, volume_id, device=None):
# Attach the volume to the server
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index e4e625b..82cc653 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -13,14 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class VolumesTestJSON(base.BaseV2ComputeTest):
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index fd2683e..3ec4ff1 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -15,11 +15,17 @@
import time
+import testtools
+
from tempest.api.identity import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import test
+CONF = config.CONF
+
+
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
@test.idempotent_id('b537d090-afb9-4519-b95d-270b0708e87e')
@@ -152,3 +158,30 @@
user = self.setup_test_user()
fetched_user = self.users_client.show_user(user['id'])['user']
self.assertEqual(user['id'], fetched_user['id'])
+
+ @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+ 'Security compliance not available.')
+ @test.idempotent_id('568cd46c-ee6c-4ab4-a33a-d3791931979e')
+ def test_password_history_not_enforced_in_admin_reset(self):
+ old_password = self.os.credentials.password
+ user_id = self.os.credentials.user_id
+
+ new_password = data_utils.rand_password()
+ self.users_client.update_user(user_id, password=new_password)
+ # To be safe, we add this cleanup to restore the original password in
+ # case something goes wrong before it is restored later.
+ self.addCleanup(
+ self.users_client.update_user, user_id, password=old_password)
+
+ # Check authorization with new password
+ self.token.auth(user_id=user_id, password=new_password)
+
+ if CONF.identity.user_unique_last_password_count > 1:
+ # The password history is not enforced via the admin reset route.
+ # We can set the same password.
+ self.users_client.update_user(user_id, password=new_password)
+
+ # Restore original password
+ self.users_client.update_user(user_id, password=old_password)
+ # Check authorization with old password
+ self.token.auth(user_id=user_id, password=old_password)
diff --git a/tempest/api/identity/v2/test_users.py b/tempest/api/identity/v2/test_users.py
index 33d212c..bafb1f2 100644
--- a/tempest/api/identity/v2/test_users.py
+++ b/tempest/api/identity/v2/test_users.py
@@ -16,11 +16,15 @@
import time
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from tempest import test
+CONF = config.CONF
+
+
class IdentityUsersTest(base.BaseIdentityV2Test):
@classmethod
@@ -31,36 +35,10 @@
cls.password = cls.creds.password
cls.tenant_name = cls.creds.tenant_name
- @test.idempotent_id('165859c9-277f-4124-9479-a7d1627b0ca7')
- def test_user_update_own_password(self):
-
- def _restore_password(client, user_id, old_pass, new_pass):
- # Reset auth to get a new token with the new password
- client.auth_provider.clear_auth()
- client.auth_provider.credentials.password = new_pass
- client.update_user_own_password(user_id, password=old_pass,
- original_password=new_pass)
- # Reset auth again to verify the password restore does work.
- # Clear auth restores the original credentials and deletes
- # cached auth data
- client.auth_provider.clear_auth()
- # NOTE(lbragstad): Fernet tokens are not subsecond aware and
- # Keystone should only be precise to the second. Sleep to ensure we
- # are passing the second boundary before attempting to
- # authenticate.
- time.sleep(1)
- client.auth_provider.set_auth()
-
- old_pass = self.creds.password
- new_pass = data_utils.rand_password()
- user_id = self.creds.user_id
- # to change password back. important for allow_tenant_isolation = false
- self.addCleanup(_restore_password, self.non_admin_users_client,
- user_id, old_pass=old_pass, new_pass=new_pass)
-
- # user updates own password
+ def _update_password(self, user_id, original_password, password):
self.non_admin_users_client.update_user_own_password(
- user_id, password=new_pass, original_password=old_pass)
+ user_id, password=password, original_password=original_password)
+
# NOTE(morganfainberg): Fernet tokens are not subsecond aware and
# Keystone should only be precise to the second. Sleep to ensure
# we are passing the second boundary.
@@ -68,13 +46,55 @@
# check authorization with new password
self.non_admin_token_client.auth(self.username,
- new_pass,
+ password,
self.tenant_name)
+ # Reset auth to get a new token with the new password
+ self.non_admin_users_client.auth_provider.clear_auth()
+ self.non_admin_users_client.auth_provider.credentials.password = (
+ password)
+
+ def _restore_password(self, user_id, old_pass, new_pass):
+ if CONF.identity_feature_enabled.security_compliance:
+ # First we need to clear the password history
+ unique_count = CONF.identity.user_unique_last_password_count
+ for i in range(unique_count):
+ random_pass = data_utils.rand_password()
+ self._update_password(
+ user_id, original_password=new_pass, password=random_pass)
+ new_pass = random_pass
+
+ self._update_password(
+ user_id, original_password=new_pass, password=old_pass)
+ # Reset auth again to verify the password restore does work.
+ # Clear auth restores the original credentials and deletes
+ # cached auth data
+ self.non_admin_users_client.auth_provider.clear_auth()
+ # NOTE(lbragstad): Fernet tokens are not subsecond aware and
+ # Keystone should only be precise to the second. Sleep to ensure we
+ # are passing the second boundary before attempting to
+ # authenticate.
+ time.sleep(1)
+ self.non_admin_users_client.auth_provider.set_auth()
+
+ @test.idempotent_id('165859c9-277f-4124-9479-a7d1627b0ca7')
+ def test_user_update_own_password(self):
+ old_pass = self.creds.password
+ old_token = self.non_admin_users_client.token
+ new_pass = data_utils.rand_password()
+ user_id = self.creds.user_id
+
+ # to change password back. important for allow_tenant_isolation = false
+ self.addCleanup(self._restore_password, user_id, old_pass, new_pass)
+
+ # user updates own password
+ self._update_password(
+ user_id, original_password=old_pass, password=new_pass)
+
# authorize with old token should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
self.non_admin_token_client.auth_token,
- self.non_admin_users_client.token)
+ old_token)
# authorize with old password should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 1a38f3a..f389a8f 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -15,12 +15,18 @@
import time
+import testtools
+
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from tempest import test
+CONF = config.CONF
+
+
class IdentityV3UsersTest(base.BaseIdentityV3Test):
@classmethod
@@ -31,36 +37,11 @@
cls.username = cls.creds.username
cls.password = cls.creds.password
- @test.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
- def test_user_update_own_password(self):
-
- def _restore_password(client, user_id, old_pass, new_pass):
- # Reset auth to get a new token with the new password
- client.auth_provider.clear_auth()
- client.auth_provider.credentials.password = new_pass
- client.update_user_password(user_id, password=old_pass,
- original_password=new_pass)
- # Reset auth again to verify the password restore does work.
- # Clear auth restores the original credentials and deletes
- # cached auth data
- client.auth_provider.clear_auth()
- # NOTE(lbragstad): Fernet tokens are not subsecond aware and
- # Keystone should only be precise to the second. Sleep to ensure we
- # are passing the second boundary before attempting to
- # authenticate.
- time.sleep(1)
- client.auth_provider.set_auth()
-
- old_pass = self.creds.password
- new_pass = data_utils.rand_password()
- user_id = self.creds.user_id
- # to change password back. important for allow_tenant_isolation = false
- self.addCleanup(_restore_password, self.non_admin_users_client,
- user_id, old_pass=old_pass, new_pass=new_pass)
-
- # user updates own password
+ def _update_password(self, original_password, password):
self.non_admin_users_client.update_user_password(
- user_id, password=new_pass, original_password=old_pass)
+ self.user_id,
+ password=password,
+ original_password=original_password)
# NOTE(morganfainberg): Fernet tokens are not subsecond aware and
# Keystone should only be precise to the second. Sleep to ensure
@@ -68,15 +49,112 @@
time.sleep(1)
# check authorization with new password
- self.non_admin_token.auth(user_id=self.user_id, password=new_pass)
+ self.non_admin_token.auth(user_id=self.user_id, password=password)
+
+ # Reset auth to get a new token with the new password
+ self.non_admin_users_client.auth_provider.clear_auth()
+ self.non_admin_users_client.auth_provider.credentials.password = (
+ password)
+
+ def _restore_password(self, old_pass, new_pass):
+ if CONF.identity_feature_enabled.security_compliance:
+ # First we need to clear the password history
+ unique_count = CONF.identity.user_unique_last_password_count
+ for i in range(unique_count):
+ random_pass = data_utils.rand_password()
+ self._update_password(
+ original_password=new_pass, password=random_pass)
+ new_pass = random_pass
+
+ self._update_password(original_password=new_pass, password=old_pass)
+ # Reset auth again to verify the password restore does work.
+ # Clear auth restores the original credentials and deletes
+ # cached auth data
+ self.non_admin_users_client.auth_provider.clear_auth()
+ # NOTE(lbragstad): Fernet tokens are not subsecond aware and
+ # Keystone should only be precise to the second. Sleep to ensure we
+ # are passing the second boundary before attempting to
+ # authenticate.
+ time.sleep(1)
+ self.non_admin_users_client.auth_provider.set_auth()
+
+ @test.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
+ def test_user_update_own_password(self):
+ old_pass = self.creds.password
+ old_token = self.non_admin_client.token
+ new_pass = data_utils.rand_password()
+
+ # to change password back. important for allow_tenant_isolation = false
+ self.addCleanup(self._restore_password, old_pass, new_pass)
+
+ # user updates own password
+ self._update_password(original_password=old_pass, password=new_pass)
# authorize with old token should lead to IdentityError (404 code)
self.assertRaises(exceptions.IdentityError,
self.non_admin_token.auth,
- token=self.non_admin_client.token)
+ token=old_token)
# authorize with old password should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
self.non_admin_token.auth,
user_id=self.user_id,
password=old_pass)
+
+ @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+ 'Security compliance not available.')
+ @test.idempotent_id('941784ee-5342-4571-959b-b80dd2cea516')
+ def test_password_history_check_self_service_api(self):
+ old_pass = self.creds.password
+ new_pass1 = data_utils.rand_password()
+ new_pass2 = data_utils.rand_password()
+
+ self.addCleanup(self._restore_password, old_pass, new_pass2)
+
+ # Update password
+ self._update_password(original_password=old_pass, password=new_pass1)
+
+ if CONF.identity.user_unique_last_password_count > 1:
+ # Can not reuse a previously set password
+ self.assertRaises(exceptions.BadRequest,
+ self.non_admin_users_client.update_user_password,
+ self.user_id,
+ password=new_pass1,
+ original_password=new_pass1)
+
+ self.assertRaises(exceptions.BadRequest,
+ self.non_admin_users_client.update_user_password,
+ self.user_id,
+ password=old_pass,
+ original_password=new_pass1)
+
+ # A different password can be set
+ self._update_password(original_password=new_pass1, password=new_pass2)
+
+ @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+ 'Security compliance not available.')
+ @test.idempotent_id('a7ad8bbf-2cff-4520-8c1d-96332e151658')
+ def test_user_account_lockout(self):
+ password = self.creds.password
+
+ # First, we login using the correct credentials
+ self.non_admin_token.auth(user_id=self.user_id, password=password)
+
+ # Lock user account by using the wrong password to login
+ bad_password = data_utils.rand_password()
+ for i in range(CONF.identity.user_lockout_failure_attempts):
+ self.assertRaises(exceptions.Unauthorized,
+ self.non_admin_token.auth,
+ user_id=self.user_id,
+ password=bad_password)
+
+ # The user account must be locked, so now it is not possible to login
+ # even using the correct password
+ self.assertRaises(exceptions.Unauthorized,
+ self.non_admin_token.auth,
+ user_id=self.user_id,
+ password=password)
+
+ # If we wait the required time, the user account will be unlocked
+ time.sleep(CONF.identity.user_lockout_duration + 1)
+ self.non_admin_token.auth(user_id=self.user_id, password=password)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 812c436..23bd628 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -142,6 +142,7 @@
cls.namespaces_client = cls.os.namespaces_client
cls.resource_types_client = cls.os.resource_types_client
cls.namespace_properties_client = cls.os.namespace_properties_client
+ cls.namespace_objects_client = cls.os.namespace_objects_client
cls.schemas_client = cls.os.schemas_client
def create_namespace(cls, namespace_name=None, visibility='public',
diff --git a/tempest/api/image/v1/test_image_members.py b/tempest/api/image/v1/test_image_members.py
index 50f0926..9c211ef 100644
--- a/tempest/api/image/v1/test_image_members.py
+++ b/tempest/api/image/v1/test_image_members.py
@@ -14,6 +14,7 @@
from tempest.api.image import base
+from tempest.lib import exceptions as lib_exc
from tempest import test
@@ -54,3 +55,5 @@
body = self.image_member_client.list_image_members(image_id)
members = body['members']
self.assertEqual(0, len(members), str(members))
+ self.assertRaises(
+ lib_exc.NotFound, self.alt_img_cli.show_image, image_id)
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 9fbdcd7..0caaa67 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -36,7 +36,7 @@
msg = ("The container format and the disk format don't match. "
"Container format: %(container)s, Disk format: %(disk)s." %
{'container': container_format, 'disk': disk_format})
- raise exceptions.InvalidConfiguration(message=msg)
+ raise exceptions.InvalidConfiguration(msg)
return container_format, disk_format
@@ -54,7 +54,6 @@
disk_format=disk_format,
is_public=False,
properties=properties)
- self.assertIn('id', image)
self.assertEqual('New Name', image.get('name'))
self.assertFalse(image.get('is_public'))
self.assertEqual('queued', image.get('status'))
@@ -77,7 +76,6 @@
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
- self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
@@ -92,7 +90,6 @@
container_format=container_format,
disk_format=disk_format, is_public=False,
copy_from=CONF.image.http_image)
- self.assertIn('id', image)
self.assertEqual('New Http Image', image.get('name'))
self.assertFalse(image.get('is_public'))
waiters.wait_for_image_status(self.client, image['id'], 'active')
@@ -109,7 +106,6 @@
is_public=False,
min_ram=40,
properties=properties)
- self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
diff --git a/tempest/api/image/v1/test_images_negative.py b/tempest/api/image/v1/test_images_negative.py
index d8f103a..3493cc2 100644
--- a/tempest/api/image/v1/test_images_negative.py
+++ b/tempest/api/image/v1/test_images_negative.py
@@ -15,6 +15,7 @@
from tempest.api.image import base
+from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
@@ -44,7 +45,7 @@
def test_delete_non_existent_image(self):
# Return an error while trying to delete a non-existent image
- non_existent_image_id = '11a22b9-12a9-5555-cc11-00ab112223fa'
+ non_existent_image_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
non_existent_image_id)
@@ -58,9 +59,9 @@
@test.idempotent_id('950e5054-a3c7-4dee-ada5-e576f1087abd')
def test_delete_image_non_hex_string_id(self):
# Return an error while trying to delete an image with non hex id
- image_id = '11a22b9-120q-5555-cc11-00ab112223gj'
+ invalid_image_id = data_utils.rand_uuid()[:-1] + "j"
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
- image_id)
+ invalid_image_id)
@test.attr(type=['negative'])
@test.idempotent_id('4ed757cd-450c-44b1-9fd1-c819748c650d')
@@ -70,7 +71,8 @@
@test.attr(type=['negative'])
@test.idempotent_id('a4a448ab-3db2-4d2d-b9b2-6a1271241dfe')
- def test_delete_image_id_is_over_35_character_limit(self):
+ def test_delete_image_id_over_character_limit(self):
# Return an error while trying to delete image with id over limit
+ overlimit_image_id = data_utils.rand_uuid() + "1"
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
- '11a22b9-12a9-5555-cc11-00ab112223fa-3fac')
+ overlimit_image_id)
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 7b9244b..453bb34 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -49,7 +49,6 @@
disk_format=disk_format,
visibility='private',
ramdisk_id=uuid)
- self.assertIn('id', image)
self.assertIn('name', image)
self.assertEqual(image_name, image['name'])
self.assertIn('visibility', image)
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py b/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
new file mode 100644
index 0000000..95d1521
--- /dev/null
+++ b/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
@@ -0,0 +1,73 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.image import base
+from tempest.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest import test
+
+
+class MetadataNamespaceObjectsTest(base.BaseV2ImageTest):
+ """Test the Metadata definition namespace objects basic functionality"""
+
+ def _create_namespace_object(self, namespace):
+ object_name = data_utils.rand_name(self.__class__.__name__ + '-object')
+ namespace_object = self.namespace_objects_client.\
+ create_namespace_object(namespace['namespace'], name=object_name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.namespace_objects_client.delete_namespace_object,
+ namespace['namespace'], object_name)
+ return namespace_object
+
+ @test.idempotent_id('b1a3775e-3b5c-4f6a-a3b4-1ba3574ae718')
+ def test_create_update_delete_meta_namespace_objects(self):
+ # Create a namespace
+ namespace = self.create_namespace()
+ # Create a namespace object
+ body = self._create_namespace_object(namespace)
+ # Update a namespace object
+ up_object_name = data_utils.rand_name('update-object')
+ body = self.namespace_objects_client.update_namespace_object(
+ namespace['namespace'], body['name'],
+ name=up_object_name)
+ self.assertEqual(up_object_name, body['name'])
+ # Delete a namespace object
+ self.namespace_objects_client.delete_namespace_object(
+ namespace['namespace'], up_object_name)
+ # List namespace objects and validate deletion
+ namespace_objects = [
+ namespace_object['name'] for namespace_object in
+ self.namespace_objects_client.list_namespace_objects(
+ namespace['namespace'])['objects']]
+ self.assertNotIn(up_object_name, namespace_objects)
+
+ @test.idempotent_id('a2a3615e-3b5c-3f6a-a2b1-1ba3574ae738')
+ def test_list_meta_namespace_objects(self):
+ # Create a namespace object
+ namespace = self.create_namespace()
+ meta_namespace_object = self._create_namespace_object(namespace)
+ # List namespace objects
+ namespace_objects = [
+ namespace_object['name'] for namespace_object in
+ self.namespace_objects_client.list_namespace_objects(
+ namespace['namespace'])['objects']]
+ self.assertIn(meta_namespace_object['name'], namespace_objects)
+
+ @test.idempotent_id('b1a3674e-3b4c-3f6a-a3b4-1ba3573ca768')
+ def test_show_meta_namespace_objects(self):
+ # Create a namespace object
+ namespace = self.create_namespace()
+ namespace_object = self._create_namespace_object(namespace)
+ # Show a namespace object
+ body = self.namespace_objects_client.show_namespace_object(
+ namespace['namespace'], namespace_object['name'])
+ self.assertEqual(namespace_object['name'], body['name'])
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index c256b5b..beb6ce6 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -57,8 +57,7 @@
# Try to create a third network while the quota is two
with self.assertRaisesRegex(
lib_exc.Conflict,
- "An object with that identifier already exists\\n" +
- "Details.*Quota exceeded for resources: \['network'\].*"):
+ "Quota exceeded for resources: \['network'\].*"):
n3 = self.networks_client.create_network()
self.addCleanup(self.networks_client.delete_network,
n3['network']['id'])
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index 978fb8f..8695ebd 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -57,7 +55,7 @@
project_id, **new_quotas)['quota']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_quotas_client.reset_quotas, project_id)
- for key, value in six.iteritems(new_quotas):
+ for key, value in new_quotas.items():
self.assertEqual(value, quota_set[key])
# Confirm our project is listed among projects with non default quotas
@@ -71,7 +69,7 @@
# Confirm from API quotas were changed as requested for project
quota_set = self.admin_quotas_client.show_quotas(project_id)
quota_set = quota_set['quota']
- for key, value in six.iteritems(new_quotas):
+ for key, value in new_quotas.items():
self.assertEqual(value, quota_set[key])
# Reset quotas to default and confirm
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 84c48ec..3c96a93 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -16,8 +16,6 @@
import netaddr
import random
-import six
-
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest.common.utils import net_info
@@ -126,7 +124,7 @@
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
- kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v)
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
@@ -269,7 +267,7 @@
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
- kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v)
subnet = self.create_subnet(self.network, **kwargs)
port = self.create_port(self.network)
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
@@ -291,7 +289,7 @@
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
- kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v)
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
@@ -340,7 +338,7 @@
{'subnet_id': subnet['id'],
'ip_address': ip}])
self.assertRaisesRegex(lib_exc.Conflict,
- "object with that identifier already exists",
+ "IpAddressAlreadyAllocated|IpAddressInUse",
self.create_port,
self.network,
fixed_ips=[{'subnet_id': subnet['id'],
@@ -364,7 +362,7 @@
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
- kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v)
subnet, port = self._create_subnet_router(kwargs)
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index acac22b..8e2f3f6 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -130,7 +130,7 @@
**kwargs)
compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
mask_bits=mask_bits, **kwargs)
- compare_args = dict((k, v) for k, v in six.iteritems(compare_args_full)
+ compare_args = dict((k, v) for k, v in compare_args_full.items()
if v is not None)
if 'dns_nameservers' in set(subnet).intersection(compare_args):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5b46088..e7153f0 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -108,7 +108,7 @@
if ((address.version == 4 and address.prefixlen >= 30) or
(address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
- raise exceptions.InvalidConfiguration(message=msg)
+ raise exceptions.InvalidConfiguration(msg)
allocation_pools = {'allocation_pools': [{'start': str(address[2]),
'end': str(address[-2])}]}
subnet = self.create_subnet(network, cidr=address,
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index f2170ad..101e4dd 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -14,7 +14,6 @@
# under the License.
import netaddr
-import six
from tempest.api.network import base_routers as base
from tempest.common.utils import data_utils
@@ -163,7 +162,7 @@
self.assertIsNone(actual_ext_gw_info)
return
# Verify only keys passed in exp_ext_gw_info
- for k, v in six.iteritems(exp_ext_gw_info):
+ for k, v in exp_ext_gw_info.items():
self.assertEqual(v, actual_ext_gw_info[k])
def _verify_gateway_port(self, router_id):
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 1031ab8..be01852 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from tempest.api.network import base_security_groups as base
from tempest.common.utils import data_utils
from tempest import config
@@ -62,7 +60,7 @@
'port_range_max': port_range_max,
'remote_group_id': remote_group_id,
'remote_ip_prefix': remote_ip_prefix}
- for key, value in six.iteritems(expected):
+ for key, value in expected.items():
self.assertEqual(value, sec_group_rule[key],
"Field %s of the created security group "
"rule does not match with %s." %
@@ -131,7 +129,7 @@
rule_create_body['security_group_rule']['id']
)
create_dict = rule_create_body['security_group_rule']
- for key, value in six.iteritems(create_dict):
+ for key, value in create_dict.items():
self.assertEqual(value,
show_rule_body['security_group_rule'][key],
"%s does not match." % key)
diff --git a/tempest/api/network/test_service_providers.py b/tempest/api/network/test_service_providers.py
index 09fb5fe..be17b3e 100644
--- a/tempest/api/network/test_service_providers.py
+++ b/tempest/api/network/test_service_providers.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.network import base
from tempest import test
@@ -17,6 +19,9 @@
class ServiceProvidersTest(base.BaseNetworkTest):
@test.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
+ @testtools.skipUnless(
+ test.is_extension_enabled('service-type', 'network'),
+ 'service-type extension not enabled.')
def test_service_providers_list(self):
body = self.service_providers_client.list_service_providers()
self.assertIn('service_providers', body)
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index eb313d2..535137e 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -25,6 +25,38 @@
CONF = config.CONF
+def delete_containers(containers, container_client, object_client):
+ """Remove containers and all objects in them.
+
+ The containers should be visible from the container_client given.
+ Will not throw any error if the containers don't exist.
+ Will not check that object and container deletions succeed.
+ After delete all the objects from a container, it will wait 2
+ seconds before delete the container itself, in order to deployments
+ using HA proxy sync the deletion properly, otherwise, the container
+ might fail to be deleted because it's not empty.
+
+ :param containers: List of containers to be deleted
+ :param container_client: Client to be used to delete containers
+ :param object_client: Client to be used to delete objects
+ """
+ for cont in containers:
+ try:
+ params = {'limit': 9999, 'format': 'json'}
+ resp, objlist = container_client.list_container_contents(
+ cont, params)
+ # delete every object in the container
+ for obj in objlist:
+ test_utils.call_and_ignore_notfound_exc(
+ object_client.delete_object, cont, obj['name'])
+ # sleep 2 seconds to sync the deletion of the objects
+ # in HA deployment
+ time.sleep(2)
+ container_client.delete_container(cont)
+ except lib_exc.NotFound:
+ pass
+
+
class BaseObjectTest(tempest.test.BaseTestCase):
credentials = [['operator', CONF.object_storage.operator_role]]
@@ -98,42 +130,12 @@
return object_name, data
@classmethod
- def delete_containers(cls, container_client=None,
- object_client=None):
- """Remove containers and all objects in them.
-
- The containers should be visible from the container_client given.
- Will not throw any error if the containers don't exist.
- Will not check that object and container deletions succeed.
- After delete all the objects from a container, it will wait 2
- seconds before delete the container itself, in order to deployments
- using HA proxy sync the deletion properly, otherwise, the container
- might fail to be deleted because it's not empty.
-
- :param container_client: if None, use cls.container_client, this means
- that the default testing user will be used (see 'username' in
- 'etc/tempest.conf')
- :param object_client: if None, use cls.object_client
- """
+ def delete_containers(cls, container_client=None, object_client=None):
if container_client is None:
container_client = cls.container_client
if object_client is None:
object_client = cls.object_client
- for cont in cls.containers:
- try:
- params = {'limit': 9999, 'format': 'json'}
- resp, objlist = container_client.list_container_contents(
- cont, params)
- # delete every object in the container
- for obj in objlist:
- test_utils.call_and_ignore_notfound_exc(
- object_client.delete_object, cont, obj['name'])
- # sleep 2 seconds to sync the deletion of the objects
- # in HA deployment
- time.sleep(2)
- container_client.delete_container(cont)
- except lib_exc.NotFound:
- pass
+ delete_containers(cls.containers, container_client, object_client)
def assertHeaders(self, resp, target, method):
"""Check the existence and the format of response headers"""
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index a75ed98..1eda49a 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -27,7 +27,10 @@
self.containers = []
def tearDown(self):
- self.delete_containers()
+ # NOTE(andreaf) BulkTests needs to cleanup containers after each
+ # test is executed.
+ base.delete_containers(self.containers, self.container_client,
+ self.object_client)
super(BulkTest, self).tearDown()
def _create_archive(self):
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index f63c518..2856fab 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -173,5 +173,5 @@
ex = self.assertRaises(exceptions.Conflict,
self.container_client.delete_container,
container_name)
- self.assertIn('An object with that identifier already exists',
- str(ex))
+ self.assertIn('There was a conflict when trying to complete your '
+ 'request.', str(ex))
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 8736f9a..e2e9919 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -19,8 +19,6 @@
import time
import zlib
-import six
-
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
@@ -865,7 +863,7 @@
expected = {'x-object-meta-test': '',
'x-object-meta-src': 'src_value',
'x-copied-from': self.container_name + "/" + src_obj_name}
- for key, value in six.iteritems(expected):
+ for key, value in expected.items():
self.assertIn(key, resp)
self.assertEqual(value, resp[key])
@@ -888,7 +886,7 @@
expected = {'x-object-meta-test': 'value',
'x-object-meta-src': 'src_value',
'x-copied-from': self.container_name + "/" + src_obj_name}
- for key, value in six.iteritems(expected):
+ for key, value in expected.items():
self.assertIn(key, resp)
self.assertEqual(value, resp[key])
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 6d27502..5d680d2 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -28,8 +28,6 @@
@classmethod
def skip_checks(cls):
- msg = "Skipped until Bug: 1547261 is resolved."
- raise cls.skipException(msg)
super(NeutronResourcesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
@@ -44,6 +42,7 @@
super(NeutronResourcesTestJSON, cls).setup_clients()
cls.subnets_client = cls.os.subnets_client
cls.ports_client = cls.os.ports_client
+ cls.routers_client = cls.os.routers_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index b47a5f0..7d8c94d 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.common import waiters
@@ -63,7 +62,7 @@
**new_quota_set)['quota_set']
cleanup_quota_set = dict(
- (k, v) for k, v in six.iteritems(default_quota_set)
+ (k, v) for k, v in default_quota_set.items()
if k in QUOTA_KEYS)
self.addCleanup(self.admin_quotas_client.update_quota_set,
self.demo_tenant_id, **cleanup_quota_set)
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 3098cab..6b2acc6 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -93,7 +93,6 @@
"vendor_name": vendor}
body = self.create_volume_type(description=description, name=name,
extra_specs=extra_specs)
- self.assertIn('id', body)
self.assertIn('name', body)
self.assertEqual(name, body['name'],
"The created volume_type name is not equal "
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4bd7637..a63cbf0 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -14,11 +14,8 @@
# under the License.
from tempest.api.volume import base
-from tempest import config
from tempest import test
-CONF = config.CONF
-
class VolumesActionsV2Test(base.BaseVolumeAdminTest):
diff --git a/tempest/api/volume/admin/v2/test_snapshot_manage.py b/tempest/api/volume/admin/v2/test_snapshot_manage.py
new file mode 100644
index 0000000..6a3f9ee
--- /dev/null
+++ b/tempest/api/volume/admin/v2/test_snapshot_manage.py
@@ -0,0 +1,73 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.volume import base
+from tempest.common import waiters
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class SnapshotManageAdminV2Test(base.BaseVolumeAdminTest):
+ """Unmanage & manage snapshots
+
+ This feature provides the ability to import/export volume snapshot
+ from one Cinder to another and to import snapshots that have not been
+ managed by Cinder from a storage back end to Cinder
+ """
+
+ @test.idempotent_id('0132f42d-0147-4b45-8501-cc504bbf7810')
+ @testtools.skipUnless(CONF.volume_feature_enabled.manage_snapshot,
+ "Manage snapshot tests are disabled")
+ def test_unmanage_manage_snapshot(self):
+ # Create a volume
+ volume = self.create_volume()
+
+ # Create a snapshot
+ snapshot = self.create_snapshot(volume_id=volume['id'])
+
+ # Unmanage the snapshot
+ # Unmanage snapshot function works almost the same as delete snapshot,
+ # but it does not delete the snapshot data
+ self.admin_snapshots_client.unmanage_snapshot(snapshot['id'])
+ self.admin_snapshots_client.wait_for_resource_deletion(snapshot['id'])
+
+ # Fetch snapshot ids
+ snapshot_list = [
+ snap['id'] for snap in
+ self.snapshots_client.list_snapshots()['snapshots']
+ ]
+
+ # Verify snapshot does not exist in snapshot list
+ self.assertNotIn(snapshot['id'], snapshot_list)
+
+ # Manage the snapshot
+ snapshot_ref = '_snapshot-%s' % snapshot['id']
+ new_snapshot = self.admin_snapshot_manage_client.manage_snapshot(
+ volume_id=volume['id'],
+ ref={'source-name': snapshot_ref})['snapshot']
+ self.addCleanup(self.delete_snapshot,
+ self.admin_snapshots_client, new_snapshot['id'])
+
+ # Wait for the snapshot to be available after manage operation
+ waiters.wait_for_snapshot_status(self.admin_snapshots_client,
+ new_snapshot['id'],
+ 'available')
+
+ # Verify the managed snapshot has the expected parent volume
+ self.assertEqual(new_snapshot['volume_id'], volume['id'])
diff --git a/tempest/api/volume/admin/v2/test_volume_pools.py b/tempest/api/volume/admin/v2/test_volume_pools.py
index e460278..8544a6a 100644
--- a/tempest/api/volume/admin/v2/test_volume_pools.py
+++ b/tempest/api/volume/admin/v2/test_volume_pools.py
@@ -25,19 +25,18 @@
# Create a test shared volume for tests
cls.volume = cls.create_volume()
- @test.idempotent_id('0248a46c-e226-4933-be10-ad6fca8227e7')
- def test_get_pools_without_details(self):
- volume_info = self.admin_volume_client. \
- show_volume(self.volume['id'])['volume']
- cinder_pools = self.admin_scheduler_stats_client.list_pools()['pools']
+ def _assert_host_volume_in_pools(self, with_detail=False):
+ volume_info = self.admin_volume_client.show_volume(
+ self.volume['id'])['volume']
+ cinder_pools = self.admin_volume_client.show_pools(
+ detail=with_detail)['pools']
self.assertIn(volume_info['os-vol-host-attr:host'],
[pool['name'] for pool in cinder_pools])
+ @test.idempotent_id('0248a46c-e226-4933-be10-ad6fca8227e7')
+ def test_get_pools_without_details(self):
+ self._assert_host_volume_in_pools()
+
@test.idempotent_id('d4bb61f7-762d-4437-b8a4-5785759a0ced')
def test_get_pools_with_details(self):
- volume_info = self.admin_volume_client. \
- show_volume(self.volume['id'])['volume']
- cinder_pools = self.admin_scheduler_stats_client.\
- list_pools(detail=True)['pools']
- self.assertIn(volume_info['os-vol-host-attr:host'],
- [pool['name'] for pool in cinder_pools])
+ self._assert_host_volume_in_pools(with_detail=True)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 7cd72e1..a4ed7a5 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -51,7 +51,7 @@
raise cls.skipException(msg)
else:
msg = ("Invalid Cinder API version (%s)" % cls._api_version)
- raise exceptions.InvalidConfiguration(message=msg)
+ raise exceptions.InvalidConfiguration(msg)
@classmethod
def setup_credentials(cls):
@@ -165,14 +165,19 @@
# NOTE(afazekas): these create_* and clean_* could be defined
# only in a single location in the source, and could be more general.
- @classmethod
- def delete_volume(cls, client, volume_id):
+ @staticmethod
+ def delete_volume(client, volume_id):
"""Delete volume by the given client"""
client.delete_volume(volume_id)
client.wait_for_resource_deletion(volume_id)
+ def delete_snapshot(self, client, snapshot_id):
+ """Delete snapshot by the given client"""
+ client.delete_snapshot(snapshot_id)
+ client.wait_for_resource_deletion(snapshot_id)
+
def attach_volume(self, server_id, volume_id):
- """Attachs a volume to a server"""
+ """Attach a volume to a server"""
self.servers_client.attach_volume(
server_id, volumeId=volume_id,
device='/dev/%s' % CONF.compute.volume_device_name)
@@ -181,7 +186,7 @@
self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
volume_id, 'available')
self.addCleanup(self.servers_client.detach_volume, server_id,
- self.volume_origin['id'])
+ volume_id)
@classmethod
def clear_volumes(cls):
@@ -200,16 +205,13 @@
@classmethod
def clear_snapshots(cls):
for snapshot in cls.snapshots:
- try:
- cls.snapshots_client.delete_snapshot(snapshot['id'])
- except Exception:
- pass
+ test_utils.call_and_ignore_notfound_exc(
+ cls.snapshots_client.delete_snapshot, snapshot['id'])
for snapshot in cls.snapshots:
- try:
- cls.snapshots_client.wait_for_resource_deletion(snapshot['id'])
- except Exception:
- pass
+ test_utils.call_and_ignore_notfound_exc(
+ cls.snapshots_client.wait_for_resource_deletion,
+ snapshot['id'])
def create_server(self, **kwargs):
name = kwargs.pop(
@@ -260,6 +262,8 @@
cls.admin_volume_types_client = cls.os_adm.volume_types_v2_client
cls.admin_volume_client = cls.os_adm.volumes_v2_client
cls.admin_hosts_client = cls.os_adm.volume_hosts_v2_client
+ cls.admin_snapshot_manage_client = \
+ cls.os_adm.snapshot_manage_v2_client
cls.admin_snapshots_client = cls.os_adm.snapshots_v2_client
cls.admin_backups_client = cls.os_adm.backups_v2_client
cls.admin_encryption_types_client = \
diff --git a/tempest/api/volume/test_extensions.py b/tempest/api/volume/test_extensions.py
index cce9ace..f044124 100644
--- a/tempest/api/volume/test_extensions.py
+++ b/tempest/api/volume/test_extensions.py
@@ -35,7 +35,7 @@
if len(CONF.volume_feature_enabled.api_extensions) == 0:
raise self.skipException('There are not any extensions configured')
extension_list = [extension.get('alias') for extension in extensions]
- LOG.debug("Cinder extensions: %s" % ','.join(extension_list))
+ LOG.debug("Cinder extensions: %s", ','.join(extension_list))
ext = CONF.volume_feature_enabled.api_extensions[0]
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
diff --git a/tempest/api/volume/test_volume_absolute_limits.py b/tempest/api/volume/test_volume_absolute_limits.py
index bc7694a..35e0d56 100644
--- a/tempest/api/volume/test_volume_absolute_limits.py
+++ b/tempest/api/volume/test_volume_absolute_limits.py
@@ -23,6 +23,9 @@
class AbsoluteLimitsV2Tests(base.BaseVolumeTest):
+ # avoid existing volumes of pre-defined tenant
+ force_tenant_isolation = True
+
@classmethod
def resource_setup(cls):
super(AbsoluteLimitsV2Tests, cls).resource_setup()
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 972dd58..70b3c58 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -30,6 +30,22 @@
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
+ def restore_backup(self, backup_id):
+ # Restore a backup
+ restored_volume = self.backups_client.restore_backup(
+ backup_id)['restore']
+
+ # Delete backup
+ self.addCleanup(self.volumes_client.delete_volume,
+ restored_volume['volume_id'])
+ self.assertEqual(backup_id, restored_volume['backup_id'])
+ waiters.wait_for_backup_status(self.backups_client,
+ backup_id, 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ restored_volume['volume_id'],
+ 'available')
+ return restored_volume
+
@test.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
def test_volume_backup_create_get_detailed_list_restore_delete(self):
# Create backup
@@ -38,8 +54,10 @@
volume['id'])
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
+ description = data_utils.rand_name("volume-backup-description")
backup = self.create_backup(volume_id=volume['id'],
- name=backup_name)
+ name=backup_name,
+ description=description)
self.assertEqual(backup_name, backup['name'])
waiters.wait_for_volume_status(self.volumes_client,
volume['id'], 'available')
@@ -47,6 +65,7 @@
# Get a given backup
backup = self.backups_client.show_backup(backup['id'])['backup']
self.assertEqual(backup_name, backup['name'])
+ self.assertEqual(description, backup['description'])
# Get all backups with detail
backups = self.backups_client.list_backups(
@@ -54,18 +73,7 @@
self.assertIn((backup['name'], backup['id']),
[(m['name'], m['id']) for m in backups])
- # Restore backup
- restore = self.backups_client.restore_backup(
- backup['id'])['restore']
-
- # Delete backup
- self.addCleanup(self.volumes_client.delete_volume,
- restore['volume_id'])
- self.assertEqual(backup['id'], restore['backup_id'])
- waiters.wait_for_backup_status(self.backups_client,
- backup['id'], 'available')
- waiters.wait_for_volume_status(self.volumes_client,
- restore['volume_id'], 'available')
+ self.restore_backup(backup['id'])
@test.idempotent_id('07af8f6d-80af-44c9-a5dc-c8427b1b62e6')
@test.services('compute')
@@ -96,6 +104,28 @@
name=backup_name, force=True)
self.assertEqual(backup_name, backup['name'])
+ @test.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
+ def test_bootable_volume_backup_and_restore(self):
+ # Create volume from image
+ img_uuid = CONF.compute.image_ref
+ volume = self.create_volume(imageRef=img_uuid)
+
+ volume_details = self.volumes_client.show_volume(
+ volume['id'])['volume']
+ self.assertEqual('true', volume_details['bootable'])
+
+ # Create a backup
+ backup = self.create_backup(volume_id=volume['id'])
+
+ # Restore the backup
+ restored_volume_id = self.restore_backup(backup['id'])['volume_id']
+
+ # Verify the restored backup volume is bootable
+ restored_volume_info = self.volumes_client.show_volume(
+ restored_volume_id)['volume']
+
+ self.assertEqual('true', restored_volume_info['bootable'])
+
class VolumesBackupsV1Test(VolumesBackupsV2Test):
_api_version = 1
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 3c7a2c8..6f85891 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -140,6 +140,14 @@
# Destination volume bigger than source snapshot
dst_vol = self.create_volume(snapshot_id=src_snap['id'],
size=src_size + 1)
+ # NOTE(zhufl): dst_vol is created based on snapshot, so dst_vol
+ # should be deleted before deleting snapshot, otherwise deleting
+ # snapshot will end with status 'error-deleting'. This depends on
+ # the implementation mechanism of vendors, generally speaking,
+ # some verdors will use "virtual disk clone" which will promote
+ # disk clone speed, and in this situation the "disk clone"
+ # is just a relationship between volume and snapshot.
+ self.addCleanup(self.delete_volume, self.volumes_client, dst_vol['id'])
volume = self.volumes_client.show_volume(dst_vol['id'])['volume']
# Should allow
diff --git a/tempest/clients.py b/tempest/clients.py
index 4a30f6f..8093a72 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,16 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
-
from oslo_log import log as logging
from tempest import config
from tempest.lib import auth
from tempest.lib import exceptions as lib_exc
from tempest.lib.services import clients
-from tempest.services import baremetal
-from tempest.services import identity
+from tempest.lib.services import identity
from tempest.services import object_storage
from tempest.services import orchestration
@@ -35,20 +32,19 @@
default_params = config.service_client_config()
- # TODO(andreaf) This is only used by baremetal clients,
- # and should be removed once they are out of Tempest
+ # TODO(jordanP): remove this once no Tempest plugin use that class
+ # variable.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
- def __init__(self, credentials, service=None, scope='project'):
+ def __init__(self, credentials, scope='project'):
"""Initialization of Manager class.
Setup all services clients and make them available for tests cases.
:param credentials: type Credentials or TestResources
- :param service: Service name
:param scope: default scope for tokens produced by the auth provider
"""
_, identity_uri = get_auth_provider_class(credentials)
@@ -66,12 +62,6 @@
self._set_image_clients()
self._set_network_clients()
- self.baremetal_client = baremetal.BaremetalClient(
- self.auth_provider,
- CONF.baremetal.catalog_type,
- CONF.identity.region,
- endpoint_type=CONF.baremetal.endpoint_type,
- **self.default_params_with_timeout_values)
self.orchestration_client = orchestration.OrchestrationClient(
self.auth_provider,
CONF.orchestration.catalog_type,
@@ -106,7 +96,7 @@
config.service_client_config(service_for_config))
except lib_exc.UnknownServiceClient:
LOG.warning(
- 'Could not load configuration for service %s' % service)
+ 'Could not load configuration for service %s', service)
return configuration
@@ -137,6 +127,8 @@
self.image_member_client_v2 = self.image_v2.ImageMembersClient()
self.namespaces_client = self.image_v2.NamespacesClient()
self.resource_types_client = self.image_v2.ResourceTypesClient()
+ self.namespace_objects_client = \
+ self.image_v2.NamespaceObjectsClient()
self.schemas_client = self.image_v2.SchemasClient()
self.namespace_properties_client = \
self.image_v2.NamespacePropertiesClient()
@@ -177,7 +169,6 @@
self.instance_usages_audit_log_client = (
self.compute.InstanceUsagesAuditLogClient())
self.tenant_networks_client = self.compute.TenantNetworksClient()
- self.baremetal_nodes_client = self.compute.BaremetalNodesClient()
# NOTE: The following client needs special timeout values because
# the API is a proxy for the other component.
@@ -194,67 +185,52 @@
**params_volume)
def _set_identity_clients(self):
- params = self.parameters['identity']
-
# Clients below use the admin endpoint type of Keystone API v2
- params_v2_admin = copy.copy(params)
- params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
- self.endpoints_client = identity.v2.EndpointsClient(self.auth_provider,
- **params_v2_admin)
- self.identity_client = identity.v2.IdentityClient(self.auth_provider,
- **params_v2_admin)
- self.tenants_client = identity.v2.TenantsClient(self.auth_provider,
- **params_v2_admin)
- self.roles_client = identity.v2.RolesClient(self.auth_provider,
- **params_v2_admin)
- self.users_client = identity.v2.UsersClient(self.auth_provider,
- **params_v2_admin)
- self.identity_services_client = identity.v2.ServicesClient(
- self.auth_provider, **params_v2_admin)
+ params_v2_admin = {
+ 'endpoint_type': CONF.identity.v2_admin_endpoint_type}
+ self.endpoints_client = self.identity_v2.EndpointsClient(
+ **params_v2_admin)
+ self.identity_client = self.identity_v2.IdentityClient(
+ **params_v2_admin)
+ self.tenants_client = self.identity_v2.TenantsClient(
+ **params_v2_admin)
+ self.roles_client = self.identity_v2.RolesClient(**params_v2_admin)
+ self.users_client = self.identity_v2.UsersClient(**params_v2_admin)
+ self.identity_services_client = self.identity_v2.ServicesClient(
+ **params_v2_admin)
# Clients below use the public endpoint type of Keystone API v2
- params_v2_public = copy.copy(params)
- params_v2_public['endpoint_type'] = (
- CONF.identity.v2_public_endpoint_type)
- self.identity_public_client = identity.v2.IdentityClient(
- self.auth_provider, **params_v2_public)
- self.tenants_public_client = identity.v2.TenantsClient(
- self.auth_provider, **params_v2_public)
- self.users_public_client = identity.v2.UsersClient(
- self.auth_provider, **params_v2_public)
+ params_v2_public = {
+ 'endpoint_type': CONF.identity.v2_public_endpoint_type}
+ self.identity_public_client = self.identity_v2.IdentityClient(
+ **params_v2_public)
+ self.tenants_public_client = self.identity_v2.TenantsClient(
+ **params_v2_public)
+ self.users_public_client = self.identity_v2.UsersClient(
+ **params_v2_public)
# Clients below use the endpoint type of Keystone API v3, which is set
# in endpoint_type
- params_v3 = copy.copy(params)
- params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
- self.domains_client = identity.v3.DomainsClient(self.auth_provider,
- **params_v3)
- self.identity_v3_client = identity.v3.IdentityClient(
- self.auth_provider, **params_v3)
- self.trusts_client = identity.v3.TrustsClient(self.auth_provider,
- **params_v3)
- self.users_v3_client = identity.v3.UsersClient(self.auth_provider,
- **params_v3)
- self.endpoints_v3_client = identity.v3.EndPointsClient(
- self.auth_provider, **params_v3)
- self.roles_v3_client = identity.v3.RolesClient(self.auth_provider,
- **params_v3)
- self.inherited_roles_client = identity.v3.InheritedRolesClient(
- self.auth_provider, **params_v3)
- self.role_assignments_client = identity.v3.RoleAssignmentsClient(
- self.auth_provider, **params_v3)
- self.identity_services_v3_client = identity.v3.ServicesClient(
- self.auth_provider, **params_v3)
- self.policies_client = identity.v3.PoliciesClient(self.auth_provider,
- **params_v3)
- self.projects_client = identity.v3.ProjectsClient(self.auth_provider,
- **params_v3)
- self.regions_client = identity.v3.RegionsClient(self.auth_provider,
- **params_v3)
- self.credentials_client = identity.v3.CredentialsClient(
- self.auth_provider, **params_v3)
- self.groups_client = identity.v3.GroupsClient(self.auth_provider,
- **params_v3)
+ params_v3 = {'endpoint_type': CONF.identity.v3_endpoint_type}
+ self.domains_client = self.identity_v3.DomainsClient(**params_v3)
+ self.identity_v3_client = self.identity_v3.IdentityClient(**params_v3)
+ self.trusts_client = self.identity_v3.TrustsClient(**params_v3)
+ self.users_v3_client = self.identity_v3.UsersClient(**params_v3)
+ self.endpoints_v3_client = self.identity_v3.EndPointsClient(
+ **params_v3)
+ self.roles_v3_client = self.identity_v3.RolesClient(**params_v3)
+ self.inherited_roles_client = self.identity_v3.InheritedRolesClient(
+ **params_v3)
+ self.role_assignments_client = self.identity_v3.RoleAssignmentsClient(
+ **params_v3)
+ self.identity_services_v3_client = self.identity_v3.ServicesClient(
+ **params_v3)
+ self.policies_client = self.identity_v3.PoliciesClient(**params_v3)
+ self.projects_client = self.identity_v3.ProjectsClient(**params_v3)
+ self.regions_client = self.identity_v3.RegionsClient(**params_v3)
+ self.credentials_client = self.identity_v3.CredentialsClient(
+ **params_v3)
+ self.groups_client = self.identity_v3.GroupsClient(**params_v3)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
@@ -285,6 +261,7 @@
self.encryption_types_client = self.volume_v1.EncryptionTypesClient()
self.encryption_types_v2_client = \
self.volume_v2.EncryptionTypesClient()
+ self.snapshot_manage_v2_client = self.volume_v2.SnapshotManageClient()
self.snapshots_client = self.volume_v1.SnapshotsClient()
self.snapshots_v2_client = self.volume_v2.SnapshotsClient()
self.volumes_client = self.volume_v1.VolumesClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 3d38e25..172d9e1 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -205,7 +205,7 @@
os.rename(account_file, '.'.join((account_file, 'bak')))
with open(account_file, 'w') as f:
yaml.safe_dump(accounts, f, default_flow_style=False)
- LOG.info('%s generated successfully!' % account_file)
+ LOG.info('%s generated successfully!', account_file)
def _parser_add_args(parser):
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index af86fe3..ec76103 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -149,8 +149,8 @@
def _remove_admin_user_roles(self):
tenant_ids = self.admin_role_added
- LOG.debug("Removing admin user roles where needed for tenants: %s"
- % tenant_ids)
+ LOG.debug("Removing admin user roles where needed for tenants: %s",
+ tenant_ids)
for tenant_id in tenant_ids:
self._remove_admin_role(tenant_id)
@@ -236,13 +236,13 @@
needs_role = False
LOG.debug("User already had admin privilege for this tenant")
if needs_role:
- LOG.debug("Adding admin privilege for : %s" % tenant_id)
+ LOG.debug("Adding admin privilege for : %s", tenant_id)
rl_cl.create_user_role_on_project(tenant_id, self.admin_id,
self.admin_role_id)
self.admin_role_added.append(tenant_id)
def _remove_admin_role(self, tenant_id):
- LOG.debug("Remove admin user role for tenant: %s" % tenant_id)
+ LOG.debug("Remove admin user role for tenant: %s", tenant_id)
# Must initialize AdminManager for each user role
# Otherwise authentication exception is thrown, weird
id_cl = credentials.AdminManager().identity_client
@@ -253,16 +253,16 @@
self.admin_role_id)
except Exception as ex:
LOG.exception("Failed removing role from tenant which still"
- "exists, exception: %s" % ex)
+ "exists, exception: %s", ex)
def _tenant_exists(self, tenant_id):
tn_cl = self.admin_mgr.tenants_client
try:
t = tn_cl.show_tenant(tenant_id)
- LOG.debug("Tenant is: %s" % str(t))
+ LOG.debug("Tenant is: %s", str(t))
return True
except Exception as ex:
- LOG.debug("Tenant no longer exists? %s" % ex)
+ LOG.debug("Tenant no longer exists? %s", ex)
return False
def _init_state(self):
@@ -290,8 +290,8 @@
except IOError as ex:
LOG.exception("Failed loading saved state, please be sure you"
" have first run cleanup with --init-saved-state "
- "flag prior to running tempest. Exception: %s" % ex)
+ "flag prior to running tempest. Exception: %s", ex)
sys.exit(ex)
except Exception as ex:
- LOG.exception("Exception parsing saved state json : %s" % ex)
+ LOG.exception("Exception parsing saved state json : %s", ex)
sys.exit(ex)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 32b0ebb..a632726 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -144,7 +144,7 @@
def list(self):
client = self.client
snaps = client.list_snapshots()['snapshots']
- LOG.debug("List count, %s Snapshots" % len(snaps))
+ LOG.debug("List count, %s Snapshots", len(snaps))
return snaps
def delete(self):
@@ -171,7 +171,7 @@
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
- LOG.debug("List count, %s Servers" % len(servers))
+ LOG.debug("List count, %s Servers", len(servers))
return servers
def delete(self):
@@ -193,7 +193,7 @@
def list(self):
client = self.server_groups_client
sgs = client.list_server_groups()['server_groups']
- LOG.debug("List count, %s Server Groups" % len(sgs))
+ LOG.debug("List count, %s Server Groups", len(sgs))
return sgs
def delete(self):
@@ -218,7 +218,7 @@
def list(self):
client = self.client
stacks = client.list_stacks()['stacks']
- LOG.debug("List count, %s Stacks" % len(stacks))
+ LOG.debug("List count, %s Stacks", len(stacks))
return stacks
def delete(self):
@@ -243,7 +243,7 @@
def list(self):
client = self.client
keypairs = client.list_keypairs()['keypairs']
- LOG.debug("List count, %s Keypairs" % len(keypairs))
+ LOG.debug("List count, %s Keypairs", len(keypairs))
return keypairs
def delete(self):
@@ -270,7 +270,7 @@
client = self.client
secgrps = client.list_security_groups()['security_groups']
secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
- LOG.debug("List count, %s Security Groups" % len(secgrp_del))
+ LOG.debug("List count, %s Security Groups", len(secgrp_del))
return secgrp_del
def delete(self):
@@ -295,7 +295,7 @@
def list(self):
client = self.client
floating_ips = client.list_floating_ips()['floating_ips']
- LOG.debug("List count, %s Floating IPs" % len(floating_ips))
+ LOG.debug("List count, %s Floating IPs", len(floating_ips))
return floating_ips
def delete(self):
@@ -320,7 +320,7 @@
def list(self):
client = self.client
vols = client.list_volumes()['volumes']
- LOG.debug("List count, %s Volumes" % len(vols))
+ LOG.debug("List count, %s Volumes", len(vols))
return vols
def delete(self):
@@ -402,7 +402,7 @@
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
- LOG.debug("List count, %s Networks" % networks)
+ LOG.debug("List count, %s Networks", networks)
return networks
def delete(self):
@@ -425,7 +425,7 @@
client = self.floating_ips_client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
- LOG.debug("List count, %s Network Floating IPs" % len(flips))
+ LOG.debug("List count, %s Network Floating IPs", len(flips))
return flips
def delete(self):
@@ -452,7 +452,7 @@
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
- LOG.debug("List count, %s Routers" % len(routers))
+ LOG.debug("List count, %s Routers", len(routers))
return routers
def delete(self):
@@ -483,7 +483,7 @@
hms = client.list_health_monitors()
hms = hms['health_monitors']
hms = self._filter_by_tenant_id(hms)
- LOG.debug("List count, %s Health Monitors" % len(hms))
+ LOG.debug("List count, %s Health Monitors", len(hms))
return hms
def delete(self):
@@ -507,7 +507,7 @@
members = client.list_members()
members = members['members']
members = self._filter_by_tenant_id(members)
- LOG.debug("List count, %s Members" % len(members))
+ LOG.debug("List count, %s Members", len(members))
return members
def delete(self):
@@ -531,7 +531,7 @@
vips = client.list_vips()
vips = vips['vips']
vips = self._filter_by_tenant_id(vips)
- LOG.debug("List count, %s VIPs" % len(vips))
+ LOG.debug("List count, %s VIPs", len(vips))
return vips
def delete(self):
@@ -555,7 +555,7 @@
pools = client.list_pools()
pools = pools['pools']
pools = self._filter_by_tenant_id(pools)
- LOG.debug("List count, %s Pools" % len(pools))
+ LOG.debug("List count, %s Pools", len(pools))
return pools
def delete(self):
@@ -579,7 +579,7 @@
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
- LOG.debug("List count, %s Metering Label Rules" % len(rules))
+ LOG.debug("List count, %s Metering Label Rules", len(rules))
return rules
def delete(self):
@@ -603,7 +603,7 @@
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
- LOG.debug("List count, %s Metering Labels" % len(labels))
+ LOG.debug("List count, %s Metering Labels", len(labels))
return labels
def delete(self):
@@ -632,7 +632,7 @@
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
- LOG.debug("List count, %s Ports" % len(ports))
+ LOG.debug("List count, %s Ports", len(ports))
return ports
def delete(self):
@@ -660,7 +660,7 @@
if self.is_preserve:
secgroups = self._filter_by_conf_networks(secgroups)
- LOG.debug("List count, %s security_groups" % len(secgroups))
+ LOG.debug("List count, %s security_groups", len(secgroups))
return secgroups
def delete(self):
@@ -685,7 +685,7 @@
subnets = subnets['subnets']
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
- LOG.debug("List count, %s Subnets" % len(subnets))
+ LOG.debug("List count, %s Subnets", len(subnets))
return subnets
def delete(self):
@@ -719,7 +719,7 @@
if self.is_preserve:
flavors = [flavor for flavor in flavors
if flavor['id'] not in CONF_FLAVORS]
- LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
+ LOG.debug("List count, %s Flavors after reconcile", len(flavors))
return flavors
def delete(self):
@@ -756,7 +756,7 @@
if self.is_preserve:
images = [image for image in images
if image['id'] not in CONF_IMAGES]
- LOG.debug("List count, %s Images after reconcile" % len(images))
+ LOG.debug("List count, %s Images after reconcile", len(images))
return images
def delete(self):
@@ -806,7 +806,7 @@
users = [user for user in users if user['name'] !=
CONF.auth.admin_username]
- LOG.debug("List count, %s Users after reconcile" % len(users))
+ LOG.debug("List count, %s Users after reconcile", len(users))
return users
def delete(self):
@@ -843,7 +843,7 @@
(role['id'] not in
self.saved_state_json['roles'].keys()
and role['name'] != CONF.identity.admin_role)]
- LOG.debug("List count, %s Roles after reconcile" % len(roles))
+ LOG.debug("List count, %s Roles after reconcile", len(roles))
return roles
except Exception:
LOG.exception("Cannot retrieve Roles.")
@@ -885,7 +885,7 @@
tenants = [tenant for tenant in tenants if tenant['name']
not in CONF_TENANTS]
- LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
+ LOG.debug("List count, %s Tenants after reconcile", len(tenants))
return tenants
def delete(self):
@@ -920,7 +920,7 @@
domains = [domain for domain in domains if domain['id']
not in self.saved_state_json['domains'].keys()]
- LOG.debug("List count, %s Domains after reconcile" % len(domains))
+ LOG.debug("List count, %s Domains after reconcile", len(domains))
return domains
def delete(self):
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index 99185d2..7634d9e 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -120,7 +120,7 @@
if os.path.isdir(config_dir):
shutil.copytree(config_dir, etc_dir)
else:
- LOG.warning("Global config dir %s can't be found" % config_dir)
+ LOG.warning("Global config dir %s can't be found", config_dir)
def generate_sample_config(self, local_dir):
conf_generator = os.path.join(os.path.dirname(__file__),
@@ -131,14 +131,14 @@
output_file])
else:
LOG.warning("Skipping sample config generation because global "
- "config file %s can't be found" % conf_generator)
+ "config file %s can't be found", conf_generator)
def create_working_dir(self, local_dir, config_dir):
# make sure we are working with abspath however tempest init is called
local_dir = os.path.abspath(local_dir)
# Create local dir if missing
if not os.path.isdir(local_dir):
- LOG.debug('Creating local working dir: %s' % local_dir)
+ LOG.debug('Creating local working dir: %s', local_dir)
os.mkdir(local_dir)
elif not os.listdir(local_dir) == []:
raise OSError("Directory you are trying to initialize already "
@@ -151,11 +151,11 @@
testr_dir = os.path.join(local_dir, '.testrepository')
# Create lock dir
if not os.path.isdir(lock_dir):
- LOG.debug('Creating lock dir: %s' % lock_dir)
+ LOG.debug('Creating lock dir: %s', lock_dir)
os.mkdir(lock_dir)
# Create log dir
if not os.path.isdir(log_dir):
- LOG.debug('Creating log dir: %s' % log_dir)
+ LOG.debug('Creating log dir: %s', log_dir)
os.mkdir(log_dir)
# Create and copy local etc dir
self.copy_config(etc_dir, config_dir)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 5fa8b74..a3105e0 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -88,6 +88,7 @@
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
+import six
from testrepository.commands import run_argv
from tempest.cmd import init
@@ -109,6 +110,12 @@
return
else:
os.environ["TESTR_PDB"] = ""
+ # NOTE(dims): most of our .testr.conf try to test for PYTHON
+ # environment variable and fall back to "python", under python3
+ # if it does not exist. we should set it to the python3 executable
+ # to deal with this situation better for now.
+ if six.PY3 and 'PYTHON' not in os.environ:
+ os.environ['PYTHON'] = sys.executable
def _create_testrepository(self):
if not os.path.isdir('.testrepository'):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 64543fb..4f2fe67 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -163,8 +163,8 @@
clients.servers_client.delete_server(
server['id'])
except Exception:
- LOG.exception('Deleting server %s failed'
- % server['id'])
+ LOG.exception('Deleting server %s failed',
+ server['id'])
return body, servers
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index 5634958..e6b46ed 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -88,7 +88,7 @@
project_network_mask_bits=CONF.network.project_network_mask_bits,
public_network_id=CONF.network.public_network_id,
create_networks=(CONF.auth.create_isolated_networks and not
- CONF.baremetal.driver_enabled),
+ CONF.network.shared_physical_network),
resource_prefix=CONF.resources_prefix,
**get_dynamic_provider_params())
else:
@@ -233,7 +233,6 @@
class AdminManager(clients.Manager):
"""Manager that uses admin credentials for its managed client objects"""
- def __init__(self, service=None):
+ def __init__(self):
super(AdminManager, self).__init__(
- credentials=get_configured_admin_credentials(),
- service=service)
+ credentials=get_configured_admin_credentials())
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index 8410541..ed11b21 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -14,7 +14,6 @@
import re
-import six
from testtools import helpers
@@ -217,7 +216,7 @@
"""
def match(self, actual):
- for key, value in six.iteritems(actual):
+ for key, value in actual.items():
if key in ('content-length', 'x-account-bytes-used',
'x-account-container-count', 'x-account-object-count',
'x-container-bytes-used', 'x-container-object-count')\
diff --git a/tempest/common/dynamic_creds.py b/tempest/common/dynamic_creds.py
index 2763d16..632a876 100644
--- a/tempest/common/dynamic_creds.py
+++ b/tempest/common/dynamic_creds.py
@@ -254,7 +254,7 @@
msg = "There was an exception trying to setup network " \
"resources for tenant %s, and this error happened " \
"trying to clean them up: %s"
- LOG.warning(msg % (tenant_id, cleanup_exception))
+ LOG.warning(msg, tenant_id, cleanup_exception)
raise
return network, subnet, router
@@ -316,8 +316,7 @@
credentials = self._create_creds(roles=credential_type)
self._creds[str(credential_type)] = credentials
# Maintained until tests are ported
- LOG.info("Acquired dynamic creds:\n credentials: %s"
- % credentials)
+ LOG.info("Acquired dynamic creds:\n credentials: %s", credentials)
if (self.neutron_available and
self.create_networks):
network, subnet, router = self._create_network_resources(
@@ -325,7 +324,7 @@
credentials.set_resources(network=network, subnet=subnet,
router=router)
LOG.info("Created isolated network resources for : \n"
- + " credentials: %s" % credentials)
+ + " credentials: %s", credentials)
return credentials
def get_primary_creds(self):
@@ -356,7 +355,7 @@
try:
client.delete_router(router_id)
except lib_exc.NotFound:
- LOG.warning('router with name: %s not found for delete' %
+ LOG.warning('router with name: %s not found for delete',
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
@@ -364,7 +363,7 @@
try:
client.delete_subnet(subnet_id)
except lib_exc.NotFound:
- LOG.warning('subnet with name: %s not found for delete' %
+ LOG.warning('subnet with name: %s not found for delete',
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
@@ -372,7 +371,7 @@
try:
net_client.delete_network(network_id)
except lib_exc.NotFound:
- LOG.warning('network with name: %s not found for delete' %
+ LOG.warning('network with name: %s not found for delete',
network_name)
def _cleanup_default_secgroup(self, tenant):
@@ -384,8 +383,8 @@
try:
nsg_client.delete_security_group(secgroup['id'])
except lib_exc.NotFound:
- LOG.warning('Security group %s, id %s not found for clean-up' %
- (secgroup['name'], secgroup['id']))
+ LOG.warning('Security group %s, id %s not found for clean-up',
+ secgroup['name'], secgroup['id'])
def _clear_isolated_net_resources(self):
client = self.routers_admin_client
@@ -405,7 +404,7 @@
creds.router['id'],
subnet_id=creds.subnet['id'])
except lib_exc.NotFound:
- LOG.warning('router with name: %s not found for delete' %
+ LOG.warning('router with name: %s not found for delete',
creds.router['name'])
self._clear_isolated_router(creds.router['id'],
creds.router['name'])
@@ -426,7 +425,7 @@
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
- LOG.warning("user with name: %s not found for delete" %
+ LOG.warning("user with name: %s not found for delete",
creds.username)
# NOTE(zhufl): Only when neutron's security_group ext is
# enabled, _cleanup_default_secgroup will not raise error. But
@@ -437,12 +436,12 @@
if self.neutron_available:
self._cleanup_default_secgroup(creds.tenant_id)
except lib_exc.NotFound:
- LOG.warning("failed to cleanup tenant %s's secgroup" %
+ LOG.warning("failed to cleanup tenant %s's secgroup",
creds.tenant_name)
try:
self.creds_client.delete_project(creds.tenant_id)
except lib_exc.NotFound:
- LOG.warning("tenant with name: %s not found for delete" %
+ LOG.warning("tenant with name: %s not found for delete",
creds.tenant_name)
self._creds = {}
diff --git a/tempest/common/fixed_network.py b/tempest/common/fixed_network.py
index f57c18a..f50edbd 100644
--- a/tempest/common/fixed_network.py
+++ b/tempest/common/fixed_network.py
@@ -122,5 +122,5 @@
params.update({"networks": [{'uuid': network['id']}]})
else:
LOG.warning('The provided network dict: %s was invalid and did '
- 'not contain an id' % network)
+ 'not contain an id', network)
return params
diff --git a/tempest/common/image.py b/tempest/common/image.py
index 95a7d1a..3618f7e 100644
--- a/tempest/common/image.py
+++ b/tempest/common/image.py
@@ -15,8 +15,6 @@
import copy
-import six
-
def get_image_meta_from_headers(resp):
meta = {'properties': {}}
@@ -55,13 +53,13 @@
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
- for key, value in six.iteritems(fields_copy.pop('properties', {})):
+ for key, value in fields_copy.pop('properties', {}).items():
headers['x-image-meta-property-%s' % key] = str(value)
- for key, value in six.iteritems(fields_copy.pop('api', {})):
+ for key, value in fields_copy.pop('api', {}).items():
headers['x-glance-api-property-%s' % key] = str(value)
- for key, value in six.iteritems(fields_copy):
+ for key, value in fields_copy.items():
headers['x-image-meta-%s' % key] = str(value)
return headers
diff --git a/tempest/common/preprov_creds.py b/tempest/common/preprov_creds.py
index 3f68ae8..6a95588 100644
--- a/tempest/common/preprov_creds.py
+++ b/tempest/common/preprov_creds.py
@@ -120,7 +120,7 @@
if 'resources' in account:
resources = account.pop('resources')
temp_hash = hashlib.md5()
- account_for_hash = dict((k, v) for (k, v) in six.iteritems(account)
+ account_for_hash = dict((k, v) for (k, v) in account.items()
if k in cls.HASH_CRED_FIELDS)
temp_hash.update(six.text_type(account_for_hash).encode('utf-8'))
temp_hash_key = temp_hash.hexdigest()
@@ -158,8 +158,10 @@
if resource == 'network':
hash_dict['networks'][temp_hash_key] = resources[resource]
else:
- LOG.warning('Unknown resource type %s, ignoring this field'
- % resource)
+ LOG.warning(
+ 'Unknown resource type %s, ignoring this field',
+ resource
+ )
return hash_dict
def is_multi_user(self):
@@ -245,7 +247,7 @@
free_hash = self._get_free_hash(useable_hashes)
clean_creds = self._sanitize_creds(
self.hash_dict['creds'][free_hash])
- LOG.info('%s allocated creds:\n%s' % (self.name, clean_creds))
+ LOG.info('%s allocated creds:\n%s', self.name, clean_creds)
return self._wrap_creds_with_network(free_hash)
@lockutils.synchronized('test_accounts_io', external=True)
@@ -253,7 +255,7 @@
hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
- 'one did not exist' % hash_path)
+ 'one did not exist', hash_path)
else:
os.remove(hash_path)
if not os.listdir(self.accounts_dir):
@@ -278,7 +280,7 @@
_hash = self.get_hash(creds)
clean_creds = self._sanitize_creds(self.hash_dict['creds'][_hash])
self.remove_hash(_hash)
- LOG.info("%s returned allocated creds:\n%s" % (self.name, clean_creds))
+ LOG.info("%s returned allocated creds:\n%s", self.name, clean_creds)
def get_primary_creds(self):
if self._creds.get('primary'):
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index d8993bb..009812e 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -87,7 +87,7 @@
# Shell options below add more clearness on failures,
# path is extended for some non-cirros guest oses (centos7)
cmd = CONF.validation.ssh_shell_prologue + " " + cmd
- LOG.debug("Remote command: %s" % cmd)
+ LOG.debug("Remote command: %s", cmd)
return self.ssh_client.exec_command(cmd)
@debug_ssh
@@ -248,5 +248,5 @@
except tempest.lib.exceptions.SSHExecCommandFailed:
LOG.error("Couldn't mke2fs")
cmd_why = 'sudo ls -lR /dev'
- LOG.info("Contents of /dev: %s" % self.exec_command(cmd_why))
+ LOG.info("Contents of /dev: %s", self.exec_command(cmd_why))
raise
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
index a55ee32..88697c4 100644
--- a/tempest/common/validation_resources.py
+++ b/tempest/common/validation_resources.py
@@ -60,8 +60,7 @@
parent_group_id=security_group['id'], ip_protocol='icmp',
from_port=-1, to_port=-1)
LOG.debug("SSH Validation resource security group with tcp and icmp "
- "rules %s created"
- % sg_name)
+ "rules %s created", sg_name)
return security_group
@@ -73,7 +72,7 @@
keypair_name = data_utils.rand_name('keypair')
validation_data.update(os.keypairs_client.create_keypair(
name=keypair_name))
- LOG.debug("Validation resource key %s created" % keypair_name)
+ LOG.debug("Validation resource key %s created", keypair_name)
add_rule = False
if validation_resources['security_group']:
if validation_resources['security_group_rules']:
@@ -98,11 +97,13 @@
try:
keypair_client.delete_keypair(keypair_name)
except lib_exc.NotFound:
- LOG.warning("Keypair %s is not found when attempting to delete"
- % keypair_name)
+ LOG.warning(
+ "Keypair %s is not found when attempting to delete",
+ keypair_name
+ )
except Exception as exc:
- LOG.exception('Exception raised while deleting key %s'
- % keypair_name)
+ LOG.exception('Exception raised while deleting key %s',
+ keypair_name)
if not has_exception:
has_exception = exc
if 'security_group' in validation_data:
@@ -113,15 +114,15 @@
security_group_client.wait_for_resource_deletion(sec_id)
except lib_exc.NotFound:
LOG.warning("Security group %s is not found when attempting "
- "to delete" % sec_id)
+ "to delete", sec_id)
except lib_exc.Conflict as exc:
LOG.exception('Conflict while deleting security '
- 'group %s VM might not be deleted ' % sec_id)
+ 'group %s VM might not be deleted', sec_id)
if not has_exception:
has_exception = exc
except Exception as exc:
LOG.exception('Exception raised while deleting security '
- 'group %s ' % sec_id)
+ 'group %s', sec_id)
if not has_exception:
has_exception = exc
if 'floating_ip' in validation_data:
@@ -131,10 +132,9 @@
floating_client.delete_floating_ip(fip_id)
except lib_exc.NotFound:
LOG.warning('Floating ip %s not found while attempting to '
- 'delete' % fip_id)
+ 'delete', fip_id)
except Exception as exc:
- LOG.exception('Exception raised while deleting ip %s '
- % fip_id)
+ LOG.exception('Exception raised while deleting ip %s', fip_id)
if not has_exception:
has_exception = exc
if has_exception:
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 981a922..fe648a0 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -231,35 +231,6 @@
raise lib_exc.TimeoutException(message)
-def wait_for_bm_node_status(client, node_id, attr, status):
- """Waits for a baremetal node attribute to reach given status.
-
- The client should have a show_node(node_uuid) method to get the node.
- """
- _, node = client.show_node(node_id)
- start = int(time.time())
-
- while node[attr] != status:
- time.sleep(client.build_interval)
- _, node = client.show_node(node_id)
- status_curr = node[attr]
- if status_curr == status:
- return
-
- if int(time.time()) - start >= client.build_timeout:
- message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
- 'within the required time (%(timeout)s s).' %
- {'node_id': node_id,
- 'attr': attr,
- 'status': status,
- 'timeout': client.build_timeout})
- message += ' Current state of %s: %s.' % (attr, status_curr)
- caller = test_utils.find_test_caller()
- if caller:
- message = '(%s) %s' % (caller, message)
- raise lib_exc.TimeoutException(message)
-
-
def wait_for_qos_operations(client, qos_id, operation, args=None):
"""Waits for a qos operations to be completed.
diff --git a/tempest/config.py b/tempest/config.py
index a3d4d78..4162718 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -171,7 +171,20 @@
cfg.BoolOpt('admin_domain_scope',
default=False,
help="Whether keystone identity v3 policy required "
- "a domain scoped token to use admin APIs")
+ "a domain scoped token to use admin APIs"),
+ # Security Compliance (PCI-DSS)
+ cfg.IntOpt('user_lockout_failure_attempts',
+ default=2,
+ help="The number of unsuccessful login attempts the user is "
+ "allowed before having the account locked."),
+ cfg.IntOpt('user_lockout_duration',
+ default=5,
+ help="The number of seconds a user account will remain "
+ "locked."),
+ cfg.IntOpt('user_unique_last_password_count',
+ default=2,
+ help="The number of passwords for a user that must be unique "
+ "before an old password can be reused."),
]
service_clients_group = cfg.OptGroup(name='service-clients',
@@ -207,8 +220,15 @@
# TODO(rodrigods): Remove the reseller flag when Kilo and Liberty is end
# of life.
cfg.BoolOpt('reseller',
+ default=True,
+ help='Does the environment support reseller?',
+ deprecated_for_removal=True,
+ deprecated_reason="All supported version of OpenStack now "
+ "supports the 'reseller' feature"),
+ cfg.BoolOpt('security_compliance',
default=False,
- help='Does the environment support reseller?')
+ help='Does the environment have the security compliance '
+ 'settings enabled?')
]
compute_group = cfg.OptGroup(name='compute',
@@ -571,6 +591,10 @@
default=["1.0.0.0/16", "2.0.0.0/16"],
help="List of ip pools"
" for subnetpools creation"),
+ cfg.BoolOpt('shared_physical_network',
+ default=False,
+ help="The environment does not support network separation "
+ "between tenants."),
# TODO(ylobankov): Delete this option once the Liberty release is EOL.
cfg.BoolOpt('dvr_extra_resources',
default=True,
@@ -780,6 +804,9 @@
cfg.BoolOpt('clone',
default=True,
help='Runs Cinder volume clone test'),
+ cfg.BoolOpt('manage_snapshot',
+ default=False,
+ help='Runs Cinder manage snapshot tests'),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled volume extensions with a special '
@@ -796,8 +823,11 @@
help="Is the v3 volume API enabled"),
# TODO(ynesenenko): Remove volume_services once liberty-eol happens.
cfg.BoolOpt('volume_services',
- default=False,
- help='Extract correct host info from host@backend')
+ default=True,
+ help='Extract correct host info from host@backend',
+ deprecated_for_removal=True,
+ deprecated_reason='This config switch was added for Liberty '
+ 'which is not supported anymore.')
]
@@ -975,9 +1005,6 @@
cfg.BoolOpt('sahara',
default=False,
help="Whether or not Sahara is expected to be available"),
- cfg.BoolOpt('ironic',
- default=False,
- help="Whether or not Ironic is expected to be available"),
]
debug_group = cfg.OptGroup(name="debug",
@@ -1032,56 +1059,6 @@
deprecated_for_removal=True),
]
-
-baremetal_group = cfg.OptGroup(name='baremetal',
- title='Baremetal provisioning service options',
- help='When enabling baremetal tests, Nova '
- 'must be configured to use the Ironic '
- 'driver. The following parameters for the '
- '[compute] section must be disabled: '
- 'console_output, interface_attach, '
- 'live_migration, pause, rescue, resize '
- 'shelve, snapshot, and suspend')
-
-
-# NOTE(deva): Ironic tests have been ported to tempest.lib. New config options
-# should be added to ironic/ironic_tempest_plugin/config.py.
-# However, these options need to remain here for testing stable
-# branches until Liberty release reaches EOL.
-BaremetalGroup = [
- cfg.StrOpt('catalog_type',
- default='baremetal',
- help="Catalog type of the baremetal provisioning service"),
- cfg.BoolOpt('driver_enabled',
- default=False,
- help="Whether the Ironic nova-compute driver is enabled"),
- cfg.StrOpt('driver',
- default='fake',
- help="Driver name which Ironic uses"),
- cfg.StrOpt('endpoint_type',
- default='publicURL',
- choices=['public', 'admin', 'internal',
- 'publicURL', 'adminURL', 'internalURL'],
- help="The endpoint type to use for the baremetal provisioning "
- "service"),
- cfg.IntOpt('active_timeout',
- default=300,
- help="Timeout for Ironic node to completely provision"),
- cfg.IntOpt('association_timeout',
- default=30,
- help="Timeout for association of Nova instance and Ironic "
- "node"),
- cfg.IntOpt('power_timeout',
- default=60,
- help="Timeout for Ironic power transitions."),
- cfg.IntOpt('unprovision_timeout',
- default=300,
- help="Timeout for unprovisioning an Ironic node. "
- "Takes longer since Kilo as Ironic performs an extra "
- "step in Node cleaning.")
-]
-
-
DefaultGroup = [
cfg.StrOpt('resources_prefix',
default='tempest',
@@ -1111,7 +1088,6 @@
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
- (baremetal_group, BaremetalGroup),
(input_scenario_group, InputScenarioGroup),
(None, DefaultGroup)
]
@@ -1174,7 +1150,6 @@
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
self.debug = _CONF.debug
- self.baremetal = _CONF.baremetal
self.input_scenario = _CONF['input-scenario']
logging.tempest_set_log_file('tempest.log')
@@ -1217,7 +1192,7 @@
logging.setup(_CONF, 'tempest')
LOG = logging.getLogger('tempest')
- LOG.info("Using tempest config file %s" % path)
+ LOG.info("Using tempest config file %s", path)
register_opts()
self._set_attrs()
if parse_conf:
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index 72a15b5..5d7fbe3 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -54,7 +54,7 @@
cmd = ' '.join([prefix, os.path.join(cli_dir, cmd),
flags, action, params])
cmd = cmd.strip()
- LOG.info("running: '%s'" % cmd)
+ LOG.info("running: '%s'", cmd)
if six.PY2:
cmd = cmd.encode('utf-8')
cmd = shlex.split(cmd)
diff --git a/tempest/lib/cli/output_parser.py b/tempest/lib/cli/output_parser.py
index 0313505..c71c7a7 100644
--- a/tempest/lib/cli/output_parser.py
+++ b/tempest/lib/cli/output_parser.py
@@ -112,7 +112,7 @@
if label is None:
label = line
else:
- LOG.warning('Invalid line between tables: %s' % line)
+ LOG.warning('Invalid line between tables: %s', line)
if len(table_) > 0:
LOG.warning('Missing end of table')
@@ -140,7 +140,7 @@
columns = _table_columns(line)
continue
if '|' not in line:
- LOG.warning('skipping invalid table line: %s' % line)
+ LOG.warning('skipping invalid table line: %s', line)
continue
row = []
for col in columns:
diff --git a/tempest/lib/common/cred_client.py b/tempest/lib/common/cred_client.py
index 3f10dee..ea06011 100644
--- a/tempest/lib/common/cred_client.py
+++ b/tempest/lib/common/cred_client.py
@@ -78,8 +78,8 @@
user['id'],
role['id'])
except lib_exc.Conflict:
- LOG.debug("Role %s already assigned on project %s for user %s" % (
- role['id'], project['id'], user['id']))
+ LOG.debug("Role %s already assigned on project %s for user %s",
+ role['id'], project['id'], user['id'])
@abc.abstractmethod
def get_credentials(self, user, project, password):
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 2d2771f..31d2ba5 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -406,8 +406,8 @@
def _log_request_start(self, method, req_url):
caller_name = test_utils.find_test_caller()
if self.trace_requests and re.search(self.trace_requests, caller_name):
- self.LOG.debug('Starting Request (%s): %s %s' %
- (caller_name, method, req_url))
+ self.LOG.debug('Starting Request (%s): %s %s', caller_name,
+ method, req_url)
def _log_request_full(self, resp, req_headers=None, req_body=None,
resp_body=None, extra=None):
@@ -423,11 +423,11 @@
Body: %s"""
self.LOG.debug(
- log_fmt % (
- str(req_headers),
- self._safe_body(req_body),
- str(resp_log),
- self._safe_body(resp_body)),
+ log_fmt,
+ str(req_headers),
+ self._safe_body(req_body),
+ str(resp_log),
+ self._safe_body(resp_body),
extra=extra)
def _log_request(self, method, req_url, resp,
@@ -445,12 +445,12 @@
if secs:
secs = " %.3fs" % secs
self.LOG.info(
- 'Request (%s): %s %s %s%s' % (
- caller_name,
- resp['status'],
- method,
- req_url,
- secs),
+ 'Request (%s): %s %s %s%s',
+ caller_name,
+ resp['status'],
+ method,
+ req_url,
+ secs,
extra=extra)
# Also look everything at DEBUG if you want to filter this
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index 3b28701..bd0db7c 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -74,7 +74,7 @@
# prevents frame leaks
del frame
if caller_name is None:
- LOG.debug("Sane call name not found in %s" % names)
+ LOG.debug("Sane call name not found in %s", names)
return caller_name
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 0e8e3c6..262a894 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -41,6 +41,7 @@
return {
'compute': compute,
'identity.v2': identity.v2,
+ 'identity.v3': identity.v3,
'image.v1': image.v1,
'image.v2': image.v2,
'network': network,
@@ -55,7 +56,7 @@
# NOTE(andreaf) This list will exists only as long the remain clients
# are migrated to tempest.lib, and it will then be deleted without
# deprecation or advance notice
- return set(['identity.v3', 'object-storage'])
+ return set(['object-storage'])
def available_modules():
@@ -373,7 +374,7 @@
except Exception:
LOG.exception(
'Failed to register service client from plugin %s '
- 'with parameters %s' % (plugin, service_client))
+ 'with parameters %s', plugin, service_client)
raise
def register_service_client_module(self, name, service_version,
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 24557d8..597e815 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -100,7 +100,7 @@
any changes.
:param disk_config: The name is changed to OS-DCF:diskConfig
"""
- if kwargs.get('disk_config'):
+ if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
post_body = json.dumps({'server': kwargs})
diff --git a/tempest/lib/services/identity/__init__.py b/tempest/lib/services/identity/__init__.py
index e69de29..941a10e 100644
--- a/tempest/lib/services/identity/__init__.py
+++ b/tempest/lib/services/identity/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity import v2
+from tempest.lib.services.identity import v3
+
+__all__ = ['v2', 'v3']
diff --git a/tempest/lib/services/identity/v2/token_client.py b/tempest/lib/services/identity/v2/token_client.py
index a5d7c86..c4fd483 100644
--- a/tempest/lib/services/identity/v2/token_client.py
+++ b/tempest/lib/services/identity/v2/token_client.py
@@ -117,8 +117,8 @@
LOG = logging.getLogger(__name__)
def _warn(self):
- self.LOG.warning("%s class was deprecated and renamed to %s" %
- (self.__class__.__name__, 'TokenClient'))
+ self.LOG.warning("%s class was deprecated and renamed to %s",
+ self.__class__.__name__, 'TokenClient')
def __init__(self, *args, **kwargs):
self._warn()
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index e69de29..8058d51 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3.credentials_client import \
+ CredentialsClient
+from tempest.lib.services.identity.v3.domains_client import DomainsClient
+from tempest.lib.services.identity.v3.endpoints_client import EndPointsClient
+from tempest.lib.services.identity.v3.groups_client import GroupsClient
+from tempest.lib.services.identity.v3.identity_client import IdentityClient
+from tempest.lib.services.identity.v3.inherited_roles_client import \
+ InheritedRolesClient
+from tempest.lib.services.identity.v3.policies_client import PoliciesClient
+from tempest.lib.services.identity.v3.projects_client import ProjectsClient
+from tempest.lib.services.identity.v3.regions_client import RegionsClient
+from tempest.lib.services.identity.v3.role_assignments_client import \
+ RoleAssignmentsClient
+from tempest.lib.services.identity.v3.roles_client import RolesClient
+from tempest.lib.services.identity.v3.services_client import ServicesClient
+from tempest.lib.services.identity.v3.token_client import V3TokenClient
+from tempest.lib.services.identity.v3.trusts_client import TrustsClient
+from tempest.lib.services.identity.v3.users_client import UsersClient
+
+__all__ = ['CredentialsClient', 'DomainsClient', 'EndPointsClient',
+ 'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
+ 'PoliciesClient', 'ProjectsClient', 'RegionsClient',
+ 'RoleAssignmentsClient', 'RolesClient', 'ServicesClient',
+ 'V3TokenClient', 'TrustsClient', 'UsersClient', ]
diff --git a/tempest/services/identity/v3/json/domains_client.py b/tempest/lib/services/identity/v3/domains_client.py
similarity index 100%
rename from tempest/services/identity/v3/json/domains_client.py
rename to tempest/lib/services/identity/v3/domains_client.py
diff --git a/tempest/lib/services/identity/v3/token_client.py b/tempest/lib/services/identity/v3/token_client.py
index c1f7e7b..06927f4 100644
--- a/tempest/lib/services/identity/v3/token_client.py
+++ b/tempest/lib/services/identity/v3/token_client.py
@@ -179,8 +179,8 @@
LOG = logging.getLogger(__name__)
def _warn(self):
- self.LOG.warning("%s class was deprecated and renamed to %s" %
- (self.__class__.__name__, 'V3TokenClient'))
+ self.LOG.warning("%s class was deprecated and renamed to %s",
+ self.__class__.__name__, 'V3TokenClient')
def __init__(self, *args, **kwargs):
self._warn()
diff --git a/tempest/lib/services/image/v1/images_client.py b/tempest/lib/services/image/v1/images_client.py
index e67a547..03f4c4b 100644
--- a/tempest/lib/services/image/v1/images_client.py
+++ b/tempest/lib/services/image/v1/images_client.py
@@ -115,7 +115,7 @@
if detail:
url += '/detail'
- if kwargs.get('changes_since'):
+ if 'changes_since' in kwargs:
kwargs['changes-since'] = kwargs.pop('changes_since')
if len(kwargs) > 0:
diff --git a/tempest/lib/services/image/v2/__init__.py b/tempest/lib/services/image/v2/__init__.py
index d359d4b..a35ce17 100644
--- a/tempest/lib/services/image/v2/__init__.py
+++ b/tempest/lib/services/image/v2/__init__.py
@@ -15,6 +15,8 @@
from tempest.lib.services.image.v2.image_members_client import \
ImageMembersClient
from tempest.lib.services.image.v2.images_client import ImagesClient
+from tempest.lib.services.image.v2.namespace_objects_client import \
+ NamespaceObjectsClient
from tempest.lib.services.image.v2.namespace_properties_client import \
NamespacePropertiesClient
from tempest.lib.services.image.v2.namespaces_client import NamespacesClient
@@ -22,5 +24,6 @@
ResourceTypesClient
from tempest.lib.services.image.v2.schemas_client import SchemasClient
-__all__ = ['ImageMembersClient', 'ImagesClient', 'NamespacePropertiesClient',
- 'NamespacesClient', 'ResourceTypesClient', 'SchemasClient']
+__all__ = ['ImageMembersClient', 'ImagesClient', 'NamespaceObjectsClient',
+ 'NamespacePropertiesClient', 'NamespacesClient',
+ 'ResourceTypesClient', 'SchemasClient']
diff --git a/tempest/lib/services/image/v2/namespace_objects_client.py b/tempest/lib/services/image/v2/namespace_objects_client.py
new file mode 100644
index 0000000..ac2e63e
--- /dev/null
+++ b/tempest/lib/services/image/v2/namespace_objects_client.py
@@ -0,0 +1,91 @@
+# Copyright 2016 EasyStack.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class NamespaceObjectsClient(rest_client.RestClient):
+ api_version = "v2"
+
+ def list_namespace_objects(self, namespace, **kwargs):
+ """Lists all namespace objects.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#list-objects
+ """
+ url = 'metadefs/namespaces/%s/objects' % namespace
+ if kwargs:
+ url += '?%s' % urllib.urlencode(kwargs)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def create_namespace_object(self, namespace, **kwargs):
+ """Create a namespace object
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#create-object
+ """
+ url = 'metadefs/namespaces/%s/objects' % namespace
+ data = json.dumps(kwargs)
+ resp, body = self.post(url, data)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_namespace_object(self, namespace, object_name, **kwargs):
+ """Update a namespace object
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#update-object
+ """
+ url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
+ data = json.dumps(kwargs)
+ resp, body = self.put(url, data)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_namespace_object(self, namespace, object_name):
+ """Show a namespace object
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#show-object
+ """
+ url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_namespace_object(self, namespace, object_name):
+ """Delete a namespace object
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ http://developer.openstack.org/api-ref/image/v2/metadefs-index.html#delete-object
+ """
+ url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v2/__init__.py b/tempest/lib/services/volume/v2/__init__.py
index 837b4f6..8acad0f 100644
--- a/tempest/lib/services/volume/v2/__init__.py
+++ b/tempest/lib/services/volume/v2/__init__.py
@@ -27,6 +27,8 @@
from tempest.lib.services.volume.v2.scheduler_stats_client import \
SchedulerStatsClient
from tempest.lib.services.volume.v2.services_client import ServicesClient
+from tempest.lib.services.volume.v2.snapshot_manage_client import \
+ SnapshotManageClient
from tempest.lib.services.volume.v2.snapshots_client import SnapshotsClient
from tempest.lib.services.volume.v2.types_client import TypesClient
from tempest.lib.services.volume.v2.volumes_client import VolumesClient
@@ -34,4 +36,5 @@
__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'EncryptionTypesClient',
'ExtensionsClient', 'HostsClient', 'QosSpecsClient', 'QuotasClient',
'ServicesClient', 'SnapshotsClient', 'TypesClient', 'VolumesClient',
- 'LimitsClient', 'CapabilitiesClient', 'SchedulerStatsClient']
+ 'LimitsClient', 'CapabilitiesClient', 'SchedulerStatsClient',
+ 'SnapshotManageClient']
diff --git a/tempest/lib/services/volume/v2/snapshot_manage_client.py b/tempest/lib/services/volume/v2/snapshot_manage_client.py
new file mode 100644
index 0000000..aecd30b
--- /dev/null
+++ b/tempest/lib/services/volume/v2/snapshot_manage_client.py
@@ -0,0 +1,33 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class SnapshotManageClient(rest_client.RestClient):
+ """Snapshot manage V2 client."""
+
+ api_version = "v2"
+
+ def manage_snapshot(self, **kwargs):
+ """Manage a snapshot."""
+ post_body = json.dumps({'snapshot': kwargs})
+ url = 'os-snapshot-manage'
+ resp, body = self.post(url, post_body)
+ self.expected_success(202, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v2/snapshots_client.py b/tempest/lib/services/volume/v2/snapshots_client.py
index 6f51b51..2bdf1b1 100644
--- a/tempest/lib/services/volume/v2/snapshots_client.py
+++ b/tempest/lib/services/volume/v2/snapshots_client.py
@@ -184,3 +184,11 @@
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def unmanage_snapshot(self, snapshot_id):
+ """Unmanage a snapshot."""
+ post_body = json.dumps({'os-unmanage': {}})
+ url = 'snapshots/%s/action' % (snapshot_id)
+ resp, body = self.post(url, post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 73544d9..2da2f92 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -20,7 +20,6 @@
from oslo_log import log
from oslo_serialization import jsonutils as json
from oslo_utils import netutils
-import six
from tempest.common import compute
from tempest.common import image as common_image
@@ -157,7 +156,7 @@
# Convert security group names to security group ids
# to pass to create_port
if 'security_groups' in kwargs:
- security_groups =\
+ security_groups = \
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
@@ -380,9 +379,9 @@
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, "
- "properties: %s, ami: %s, ari: %s, aki: %s" %
- (img_path, img_container_format, img_disk_format,
- img_properties, ami_img_path, ari_img_path, aki_img_path))
+ "properties: %s, ami: %s, ari: %s, aki: %s",
+ img_path, img_container_format, img_disk_format,
+ img_properties, ami_img_path, ari_img_path, aki_img_path)
try:
image = self._image_create('scenario-img',
img_container_format,
@@ -397,7 +396,7 @@
image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
- LOG.debug("image:%s" % image)
+ LOG.debug("image:%s", image)
return image
@@ -530,14 +529,14 @@
caller = test_utils.find_test_caller()
LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
- ' expected result is %(should_succeed)s' % {
+ ' expected result is %(should_succeed)s', **{
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'should_succeed':
'reachable' if should_succeed else 'unreachable'
})
result = test_utils.call_until_true(ping, timeout, 1)
LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
- 'ping result is %(result)s' % {
+ 'ping result is %(result)s', **{
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
@@ -578,8 +577,8 @@
msg=None, servers=None, mtu=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
- LOG.debug('checking network connections to IP %s with user: %s' %
- (ip_address, username))
+ LOG.debug('checking network connections to IP %s with user: %s',
+ ip_address, username)
try:
self.check_vm_connectivity(ip_address,
username,
@@ -817,7 +816,7 @@
# servers. Neutron does not bind ports for Ironic instances, as a
# result the port remains in the DOWN state.
# TODO(vsaienko) remove once bug: #1599836 is resolved.
- if CONF.service_available.ironic:
+ if getattr(CONF.service_available, 'ironic', False):
p_status.append('DOWN')
port_map = [(p["id"], fxip["ip_address"])
for p in ports
@@ -920,7 +919,7 @@
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
- for net_name, ip_addresses in six.iteritems(server['addresses']):
+ for net_name, ip_addresses in server['addresses'].items():
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
@@ -948,7 +947,7 @@
source.ping_host(dest, nic=nic)
except lib_exc.SSHExecCommandFailed:
LOG.warning('Failed to ping IP: %s via a ssh connection '
- 'from: %s.' % (dest, source.ssh_client.host))
+ 'from: %s.', dest, source.ssh_client.host)
return not should_succeed
return should_succeed
@@ -1189,7 +1188,7 @@
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
- if CONF.baremetal.driver_enabled:
+ if CONF.network.shared_physical_network:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
@@ -1230,151 +1229,6 @@
return network, subnet, router
-# power/provision states as of icehouse
-class BaremetalPowerStates(object):
- """Possible power states of an Ironic node."""
- POWER_ON = 'power on'
- POWER_OFF = 'power off'
- REBOOT = 'rebooting'
- SUSPEND = 'suspended'
-
-
-class BaremetalProvisionStates(object):
- """Possible provision states of an Ironic node."""
- NOSTATE = None
- INIT = 'initializing'
- ACTIVE = 'active'
- BUILDING = 'building'
- DEPLOYWAIT = 'wait call-back'
- DEPLOYING = 'deploying'
- DEPLOYFAIL = 'deploy failed'
- DEPLOYDONE = 'deploy complete'
- DELETING = 'deleting'
- DELETED = 'deleted'
- ERROR = 'error'
-
-
-class BaremetalScenarioTest(ScenarioTest):
-
- credentials = ['primary', 'admin']
-
- @classmethod
- def skip_checks(cls):
- super(BaremetalScenarioTest, cls).skip_checks()
- if (not CONF.service_available.ironic or
- not CONF.baremetal.driver_enabled):
- msg = 'Ironic not available or Ironic compute driver not enabled'
- raise cls.skipException(msg)
-
- @classmethod
- def setup_clients(cls):
- super(BaremetalScenarioTest, cls).setup_clients()
-
- cls.baremetal_client = cls.admin_manager.baremetal_client
-
- @classmethod
- def resource_setup(cls):
- super(BaremetalScenarioTest, cls).resource_setup()
- # allow any issues obtaining the node list to raise early
- cls.baremetal_client.list_nodes()
-
- def _node_state_timeout(self, node_id, state_attr,
- target_states, timeout=10, interval=1):
- if not isinstance(target_states, list):
- target_states = [target_states]
-
- def check_state():
- node = self.get_node(node_id=node_id)
- if node.get(state_attr) in target_states:
- return True
- return False
-
- if not test_utils.call_until_true(
- check_state, timeout, interval):
- msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
- (node_id, state_attr, target_states))
- raise lib_exc.TimeoutException(msg)
-
- def wait_provisioning_state(self, node_id, state, timeout):
- self._node_state_timeout(
- node_id=node_id, state_attr='provision_state',
- target_states=state, timeout=timeout)
-
- def wait_power_state(self, node_id, state):
- self._node_state_timeout(
- node_id=node_id, state_attr='power_state',
- target_states=state, timeout=CONF.baremetal.power_timeout)
-
- def wait_node(self, instance_id):
- """Waits for a node to be associated with instance_id."""
-
- def _get_node():
- node = test_utils.call_and_ignore_notfound_exc(
- self.get_node, instance_id=instance_id)
- return node is not None
-
- if not test_utils.call_until_true(
- _get_node, CONF.baremetal.association_timeout, 1):
- msg = ('Timed out waiting to get Ironic node by instance id %s'
- % instance_id)
- raise lib_exc.TimeoutException(msg)
-
- def get_node(self, node_id=None, instance_id=None):
- if node_id:
- _, body = self.baremetal_client.show_node(node_id)
- return body
- elif instance_id:
- _, body = self.baremetal_client.show_node_by_instance_uuid(
- instance_id)
- if body['nodes']:
- return body['nodes'][0]
-
- def get_ports(self, node_uuid):
- ports = []
- _, body = self.baremetal_client.list_node_ports(node_uuid)
- for port in body['ports']:
- _, p = self.baremetal_client.show_port(port['uuid'])
- ports.append(p)
- return ports
-
- def add_keypair(self):
- self.keypair = self.create_keypair()
-
- def boot_instance(self):
- self.instance = self.create_server(
- key_name=self.keypair['name'])
-
- self.wait_node(self.instance['id'])
- self.node = self.get_node(instance_id=self.instance['id'])
-
- self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
-
- self.wait_provisioning_state(
- self.node['uuid'],
- [BaremetalProvisionStates.DEPLOYWAIT,
- BaremetalProvisionStates.ACTIVE],
- timeout=15)
-
- self.wait_provisioning_state(self.node['uuid'],
- BaremetalProvisionStates.ACTIVE,
- timeout=CONF.baremetal.active_timeout)
-
- waiters.wait_for_server_status(self.servers_client,
- self.instance['id'], 'ACTIVE')
- self.node = self.get_node(instance_id=self.instance['id'])
- self.instance = (self.servers_client.show_server(self.instance['id'])
- ['server'])
-
- def terminate_instance(self):
- self.servers_client.delete_server(self.instance['id'])
- self.wait_power_state(self.node['uuid'],
- BaremetalPowerStates.POWER_OFF)
- self.wait_provisioning_state(
- self.node['uuid'],
- BaremetalProvisionStates.NOSTATE,
- timeout=CONF.baremetal.unprovision_timeout)
-
-
class EncryptionScenarioTest(ScenarioTest):
"""Base class for encryption scenario tests"""
@@ -1460,7 +1314,7 @@
self.container_client.create_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
- LOG.debug('Container %s created' % (name))
+ LOG.debug('Container %s created', name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.container_client.delete_container,
name)
@@ -1468,7 +1322,7 @@
def delete_container(self, container_name):
self.container_client.delete_container(container_name)
- LOG.debug('Container %s deleted' % (container_name))
+ LOG.debug('Container %s deleted', container_name)
def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
deleted file mode 100644
index 45c38f6..0000000
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-
-from tempest.scenario import manager
-from tempest import test
-
-LOG = logging.getLogger(__name__)
-
-
-class BaremetalBasicOps(manager.BaremetalScenarioTest):
- """This test tests the pxe_ssh Ironic driver.
-
- It follows this basic set of operations:
- * Creates a keypair
- * Boots an instance using the keypair
- * Monitors the associated Ironic node for power and
- expected state transitions
- * Validates Ironic node's port data has been properly updated
- * Verifies SSH connectivity using created keypair via fixed IP
- * Associates a floating ip
- * Verifies SSH connectivity using created keypair via floating IP
- * Deletes instance
- * Monitors the associated Ironic node for power and
- expected state transitions
- """
- def verify_partition(self, client, label, mount, gib_size):
- """Verify a labeled partition's mount point and size."""
- LOG.info("Looking for partition %s mounted on %s" % (label, mount))
-
- # Validate we have a device with the given partition label
- cmd = "/sbin/blkid | grep '%s' | cut -d':' -f1" % label
- device = client.exec_command(cmd).rstrip('\n')
- LOG.debug("Partition device is %s" % device)
- self.assertNotEqual('', device)
-
- # Validate the mount point for the device
- cmd = "mount | grep '%s' | cut -d' ' -f3" % device
- actual_mount = client.exec_command(cmd).rstrip('\n')
- LOG.debug("Partition mount point is %s" % actual_mount)
- self.assertEqual(actual_mount, mount)
-
- # Validate the partition size matches what we expect
- numbers = '0123456789'
- devnum = device.replace('/dev/', '')
- cmd = "cat /sys/block/%s/%s/size" % (devnum.rstrip(numbers), devnum)
- num_bytes = client.exec_command(cmd).rstrip('\n')
- num_bytes = int(num_bytes) * 512
- actual_gib_size = num_bytes / (1024 * 1024 * 1024)
- LOG.debug("Partition size is %d GiB" % actual_gib_size)
- self.assertEqual(actual_gib_size, gib_size)
-
- def get_flavor_ephemeral_size(self):
- """Returns size of the ephemeral partition in GiB."""
- f_id = self.instance['flavor']['id']
- flavor = self.flavors_client.show_flavor(f_id)['flavor']
- ephemeral = flavor.get('OS-FLV-EXT-DATA:ephemeral')
- if not ephemeral or ephemeral == 'N/A':
- return None
- return int(ephemeral)
-
- def validate_ports(self):
- for port in self.get_ports(self.node['uuid']):
- n_port_id = port['extra']['vif_port_id']
- body = self.ports_client.show_port(n_port_id)
- n_port = body['port']
- self.assertEqual(n_port['device_id'], self.instance['id'])
- self.assertEqual(n_port['mac_address'], port['address'])
-
- @test.idempotent_id('549173a5-38ec-42bb-b0e2-c8b9f4a08943')
- @test.services('baremetal', 'compute', 'image', 'network')
- def test_baremetal_server_ops(self):
- self.add_keypair()
- self.boot_instance()
- self.validate_ports()
- ip_address = self.get_server_ip(self.instance)
- self.get_remote_client(ip_address).validate_authentication()
- vm_client = self.get_remote_client(ip_address)
-
- # We expect the ephemeral partition to be mounted on /mnt and to have
- # the same size as our flavor definition.
- eph_size = self.get_flavor_ephemeral_size()
- if eph_size:
- self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
- # Create the test file
- self.create_timestamp(
- ip_address, private_key=self.keypair['private_key'])
-
- self.terminate_instance()
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 0605902..1279484 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -198,12 +198,11 @@
@test.idempotent_id('a4858f6c-401e-4155-9a49-d5cd053d1a2f')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
@test.services('compute', 'network')
def test_server_connectivity_cold_migration(self):
- if CONF.compute.min_compute_nodes < 2:
- msg = "Less than 2 compute nodes, skipping multinode tests."
- raise self.skipException(msg)
-
keypair = self.create_keypair()
server = self._setup_server(keypair)
floating_ip = self._setup_network(server, keypair)
@@ -220,3 +219,28 @@
dst_host = self._get_host_for_server(server['id'])
self.assertNotEqual(src_host, dst_host)
+
+ @test.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @test.services('compute', 'network')
+ def test_server_connectivity_cold_migration_revert(self):
+ keypair = self.create_keypair()
+ server = self._setup_server(keypair)
+ floating_ip = self._setup_network(server, keypair)
+ src_host = self._get_host_for_server(server['id'])
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ self.admin_servers_client.migrate_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client, server['id'],
+ 'VERIFY_RESIZE')
+ self.servers_client.revert_resize_server(server['id'])
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+ dst_host = self._get_host_for_server(server['id'])
+
+ self.assertEqual(src_host, dst_host)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 4a076e4..f9aa3e7 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -417,8 +417,9 @@
should_connect=True, mtu=self.network['mtu'])
@test.idempotent_id('1546850e-fbaa-42f5-8b5f-03d8a6a95f15')
- @testtools.skipIf(CONF.baremetal.driver_enabled,
- 'Baremetal relies on a shared physical network.')
+ @testtools.skipIf(CONF.network.shared_physical_network,
+ 'Connectivity can only be tested when in a '
+ 'multitenant network environment')
@decorators.skip_because(bug="1610994")
@test.services('compute', 'network')
def test_connectivity_between_vms_on_different_networks(self):
@@ -492,9 +493,9 @@
self._check_network_internal_connectivity(network=self.new_net)
@test.idempotent_id('04b9fe4e-85e8-4aea-b937-ea93885ac59f')
- @testtools.skipIf(CONF.baremetal.driver_enabled,
- 'Router state cannot be altered on a shared baremetal '
- 'network')
+ @testtools.skipIf(CONF.network.shared_physical_network,
+ 'Router state can be altered only with multitenant '
+ 'networks capabilities')
@test.services('compute', 'network')
def test_update_router_admin_state(self):
"""Test to update admin state up of router
@@ -524,8 +525,8 @@
"admin_state_up of router to True")
@test.idempotent_id('d8bb918e-e2df-48b2-97cd-b73c95450980')
- @testtools.skipIf(CONF.baremetal.driver_enabled,
- 'network isolation not available for baremetal nodes')
+ @testtools.skipIf(CONF.network.shared_physical_network,
+ 'network isolation not available')
@testtools.skipUnless(CONF.scenario.dhcp_client,
"DHCP client is not available.")
@test.services('compute', 'network')
@@ -607,9 +608,6 @@
"new DNS nameservers")
@test.idempotent_id('f5dfcc22-45fd-409f-954c-5bd500d7890b')
- @testtools.skipIf(CONF.baremetal.driver_enabled,
- 'admin_state of instance ports cannot be altered '
- 'for baremetal nodes')
@testtools.skipUnless(CONF.network_feature_enabled.port_admin_state_change,
"Changing a port's admin state is not supported "
"by the test environment")
@@ -617,29 +615,44 @@
def test_update_instance_port_admin_state(self):
"""Test to update admin_state_up attribute of instance port
- 1. Check public connectivity before updating
+ 1. Check public and project connectivity before updating
admin_state_up attribute of instance port to False
- 2. Check public connectivity after updating
+ 2. Check public and project connectivity after updating
admin_state_up attribute of instance port to False
- 3. Check public connectivity after updating
+ 3. Check public and project connectivity after updating
admin_state_up attribute of instance port to True
"""
self._setup_network_and_servers()
floating_ip, server = self.floating_ip_tuple
server_id = server['id']
port_id = self._list_ports(device_id=server_id)[0]['id']
+ server_pip = server['addresses'][self.network['name']][0]['addr']
+
+ server2 = self._create_server(self.network)
+ server2_fip = self.create_floating_ip(server2)
+
+ private_key = self._get_server_key(server2)
+ ssh_client = self.get_remote_client(server2_fip['floating_ip_address'],
+ private_key=private_key)
+
self.check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of instance port to False")
+ self._check_remote_connectivity(ssh_client, dest=server_pip,
+ should_succeed=True)
self.ports_client.update_port(port_id, admin_state_up=False)
self.check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of instance port to False",
should_check_floating_ip_status=False)
+ self._check_remote_connectivity(ssh_client, dest=server_pip,
+ should_succeed=False)
self.ports_client.update_port(port_id, admin_state_up=True)
self.check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of instance port to True")
+ self._check_remote_connectivity(ssh_client, dest=server_pip,
+ should_succeed=True)
@test.idempotent_id('759462e1-8535-46b0-ab3a-33aa45c55aaa')
@test.services('compute', 'network')
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index 6700236..7acf107 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -14,8 +14,6 @@
# under the License.
import functools
-import six
-
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.scenario import manager
@@ -50,8 +48,8 @@
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
- if CONF.baremetal.driver_enabled:
- msg = ('Baremetal does not currently support network isolation')
+ if CONF.network.shared_physical_network:
+ msg = 'Deployment uses a shared physical network'
raise cls.skipException(msg)
@classmethod
@@ -112,7 +110,7 @@
@staticmethod
def define_server_ips(srv):
ips = {'4': None, '6': []}
- for net_name, nics in six.iteritems(srv['addresses']):
+ for net_name, nics in srv['addresses'].items():
for nic in nics:
if nic['version'] == 6:
ips['6'].append(nic['addr'])
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 1360b09..f8c5c0a 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -130,9 +130,6 @@
@classmethod
def skip_checks(cls):
super(TestSecurityGroupsBasicOps, cls).skip_checks()
- if CONF.baremetal.driver_enabled:
- msg = ('Not currently supported by baremetal.')
- raise cls.skipException(msg)
if CONF.network.port_vnic_type in ['direct', 'macvtap']:
msg = ('Not currently supported when using vnic_type'
' direct or macvtap')
@@ -145,6 +142,10 @@
if not test.is_extension_enabled('security-group', 'network'):
msg = "security-group extension not enabled."
raise cls.skipException(msg)
+ if CONF.network.shared_physical_network:
+ msg = ('Deployment uses a shared physical network, security '
+ 'groups not supported')
+ raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index dff00e7..b10be11 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -86,7 +86,7 @@
def _func():
disks = ssh.get_disks()
- LOG.debug("Disks: %s" % disks)
+ LOG.debug("Disks: %s", disks)
return CONF.compute.volume_device_name in disks
if not test_utils.call_until_true(_func,
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 46aebfe..2c8b618 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -118,36 +118,36 @@
volume_origin = self._create_volume_from_image()
instance_1st = self._boot_instance_from_volume(volume_origin['id'],
keypair, security_group)
- LOG.info("Booted first instance: %s" % instance_1st)
+ LOG.info("Booted first instance: %s", instance_1st)
# write content to volume on instance
- LOG.info("Setting timestamp in instance %s" % instance_1st)
+ LOG.info("Setting timestamp in instance %s", instance_1st)
ip_instance_1st = self.get_server_ip(instance_1st)
timestamp = self.create_timestamp(ip_instance_1st,
private_key=keypair['private_key'])
# delete instance
- LOG.info("Deleting first instance: %s" % instance_1st)
+ LOG.info("Deleting first instance: %s", instance_1st)
self._delete_server(instance_1st)
# create a 2nd instance from volume
instance_2nd = self._boot_instance_from_volume(volume_origin['id'],
keypair, security_group)
- LOG.info("Booted second instance %s" % instance_2nd)
+ LOG.info("Booted second instance %s", instance_2nd)
# check the content of written file
- LOG.info("Getting timestamp in instance %s" % instance_2nd)
+ LOG.info("Getting timestamp in instance %s", instance_2nd)
ip_instance_2nd = self.get_server_ip(instance_2nd)
timestamp2 = self.get_timestamp(ip_instance_2nd,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp2)
# snapshot a volume
- LOG.info("Creating snapshot from volume: %s" % volume_origin['id'])
+ LOG.info("Creating snapshot from volume: %s", volume_origin['id'])
snapshot = self._create_snapshot_from_volume(volume_origin['id'])
# create a 3rd instance from snapshot
- LOG.info("Creating third instance from snapshot: %s" % snapshot['id'])
+ LOG.info("Creating third instance from snapshot: %s", snapshot['id'])
volume = self.create_volume(snapshot_id=snapshot['id'],
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
@@ -157,7 +157,7 @@
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
- LOG.info("Logging into third instance to get timestamp: %s" %
+ LOG.info("Logging into third instance to get timestamp: %s",
server_from_snapshot)
server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
timestamp3 = self.get_timestamp(server_from_snapshot_ip,
diff --git a/tempest/services/baremetal/__init__.py b/tempest/services/baremetal/__init__.py
deleted file mode 100644
index 390f40a..0000000
--- a/tempest/services/baremetal/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.services.baremetal.v1.json.baremetal_client import \
- BaremetalClient
-
-__all__ = ['BaremetalClient']
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
deleted file mode 100644
index 2bdd092..0000000
--- a/tempest/services/baremetal/base.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-
-from oslo_serialization import jsonutils as json
-import six
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-def handle_errors(f):
- """A decorator that allows to ignore certain types of errors."""
-
- @functools.wraps(f)
- def wrapper(*args, **kwargs):
- param_name = 'ignore_errors'
- ignored_errors = kwargs.get(param_name, tuple())
-
- if param_name in kwargs:
- del kwargs[param_name]
-
- try:
- return f(*args, **kwargs)
- except ignored_errors:
- # Silently ignore errors
- pass
-
- return wrapper
-
-
-class BaremetalClient(rest_client.RestClient):
- """Base Tempest REST client for Ironic API."""
-
- uri_prefix = ''
-
- def serialize(self, object_dict):
- """Serialize an Ironic object."""
-
- return json.dumps(object_dict)
-
- def deserialize(self, object_str):
- """Deserialize an Ironic object."""
-
- return json.loads(object_str)
-
- def _get_uri(self, resource_name, uuid=None, permanent=False):
- """Get URI for a specific resource or object.
-
- :param resource_name: The name of the REST resource, e.g., 'nodes'.
- :param uuid: The unique identifier of an object in UUID format.
- :returns: Relative URI for the resource or object.
-
- """
- prefix = self.uri_prefix if not permanent else ''
-
- return '{pref}/{res}{uuid}'.format(pref=prefix,
- res=resource_name,
- uuid='/%s' % uuid if uuid else '')
-
- def _make_patch(self, allowed_attributes, **kwargs):
- """Create a JSON patch according to RFC 6902.
-
- :param allowed_attributes: An iterable object that contains a set of
- allowed attributes for an object.
- :param **kwargs: Attributes and new values for them.
- :returns: A JSON path that sets values of the specified attributes to
- the new ones.
-
- """
- def get_change(kwargs, path='/'):
- for name, value in six.iteritems(kwargs):
- if isinstance(value, dict):
- for ch in get_change(value, path + '%s/' % name):
- yield ch
- else:
- if value is None:
- yield {'path': path + name,
- 'op': 'remove'}
- else:
- yield {'path': path + name,
- 'value': value,
- 'op': 'replace'}
-
- patch = [ch for ch in get_change(kwargs)
- if ch['path'].lstrip('/') in allowed_attributes]
-
- return patch
-
- def _list_request(self, resource, permanent=False, **kwargs):
- """Get the list of objects of the specified type.
-
- :param resource: The name of the REST resource, e.g., 'nodes'.
- :param **kwargs: Parameters for the request.
- :returns: A tuple with the server response and deserialized JSON list
- of objects
-
- """
- uri = self._get_uri(resource, permanent=permanent)
- if kwargs:
- uri += "?%s" % urllib.urlencode(kwargs)
-
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
-
- return resp, self.deserialize(body)
-
- def _show_request(self, resource, uuid, permanent=False, **kwargs):
- """Gets a specific object of the specified type.
-
- :param uuid: Unique identifier of the object in UUID format.
- :returns: Serialized object as a dictionary.
-
- """
- if 'uri' in kwargs:
- uri = kwargs['uri']
- else:
- uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
-
- return resp, self.deserialize(body)
-
- def _create_request(self, resource, object_dict):
- """Create an object of the specified type.
-
- :param resource: The name of the REST resource, e.g., 'nodes'.
- :param object_dict: A Python dict that represents an object of the
- specified type.
- :returns: A tuple with the server response and the deserialized created
- object.
-
- """
- body = self.serialize(object_dict)
- uri = self._get_uri(resource)
-
- resp, body = self.post(uri, body=body)
- self.expected_success(201, resp.status)
-
- return resp, self.deserialize(body)
-
- def _delete_request(self, resource, uuid):
- """Delete specified object.
-
- :param resource: The name of the REST resource, e.g., 'nodes'.
- :param uuid: The unique identifier of an object in UUID format.
- :returns: A tuple with the server response and the response body.
-
- """
- uri = self._get_uri(resource, uuid)
-
- resp, body = self.delete(uri)
- self.expected_success(204, resp.status)
- return resp, body
-
- def _patch_request(self, resource, uuid, patch_object):
- """Update specified object with JSON-patch.
-
- :param resource: The name of the REST resource, e.g., 'nodes'.
- :param uuid: The unique identifier of an object in UUID format.
- :returns: A tuple with the server response and the serialized patched
- object.
-
- """
- uri = self._get_uri(resource, uuid)
- patch_body = json.dumps(patch_object)
-
- resp, body = self.patch(uri, body=patch_body)
- self.expected_success(200, resp.status)
- return resp, self.deserialize(body)
-
- @handle_errors
- def get_api_description(self):
- """Retrieves all versions of the Ironic API."""
-
- return self._list_request('', permanent=True)
-
- @handle_errors
- def get_version_description(self, version='v1'):
- """Retrieves the desctription of the API.
-
- :param version: The version of the API. Default: 'v1'.
- :returns: Serialized description of API resources.
-
- """
- return self._list_request(version, permanent=True)
-
- def _put_request(self, resource, put_object):
- """Update specified object with JSON-patch."""
- uri = self._get_uri(resource)
- put_body = json.dumps(put_object)
-
- resp, body = self.put(uri, body=put_body)
- self.expected_success([202, 204], resp.status)
- return resp, body
diff --git a/tempest/services/baremetal/v1/__init__.py b/tempest/services/baremetal/v1/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/baremetal/v1/__init__.py
+++ /dev/null
diff --git a/tempest/services/baremetal/v1/json/__init__.py b/tempest/services/baremetal/v1/json/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/baremetal/v1/json/__init__.py
+++ /dev/null
diff --git a/tempest/services/baremetal/v1/json/baremetal_client.py b/tempest/services/baremetal/v1/json/baremetal_client.py
deleted file mode 100644
index 7405871..0000000
--- a/tempest/services/baremetal/v1/json/baremetal_client.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.baremetal import base
-
-
-class BaremetalClient(base.BaremetalClient):
- """Base Tempest REST client for Ironic API v1."""
- version = '1'
- uri_prefix = 'v1'
-
- @base.handle_errors
- def list_nodes(self, **kwargs):
- """List all existing nodes.
-
- Available params: see http://developer.openstack.org/api-ref/
- baremetal/index.html#list-nodes
- """
- return self._list_request('nodes', **kwargs)
-
- @base.handle_errors
- def list_chassis(self):
- """List all existing chassis."""
- return self._list_request('chassis')
-
- @base.handle_errors
- def list_chassis_nodes(self, chassis_uuid):
- """List all nodes associated with a chassis."""
- return self._list_request('/chassis/%s/nodes' % chassis_uuid)
-
- @base.handle_errors
- def list_ports(self, **kwargs):
- """List all existing ports.
-
- Available params: see http://developer.openstack.org/api-ref/
- baremetal/index.html?expanded=#list-ports
- """
- return self._list_request('ports', **kwargs)
-
- @base.handle_errors
- def list_node_ports(self, uuid):
- """List all ports associated with the node."""
- return self._list_request('/nodes/%s/ports' % uuid)
-
- @base.handle_errors
- def list_nodestates(self, uuid):
- """List all existing states."""
- return self._list_request('/nodes/%s/states' % uuid)
-
- @base.handle_errors
- def list_ports_detail(self, **kwargs):
- """Details list all existing ports.
-
- Available params: see http://developer.openstack.org/api-ref/baremetal/
- index.html?expanded=#list-detailed-ports
- """
- return self._list_request('/ports/detail', **kwargs)
-
- @base.handle_errors
- def list_drivers(self):
- """List all existing drivers."""
- return self._list_request('drivers')
-
- @base.handle_errors
- def show_node(self, uuid):
- """Gets a specific node.
-
- :param uuid: Unique identifier of the node in UUID format.
- :return: Serialized node as a dictionary.
-
- """
- return self._show_request('nodes', uuid)
-
- @base.handle_errors
- def show_node_by_instance_uuid(self, instance_uuid):
- """Gets a node associated with given instance uuid.
-
- :param instance_uuid: Unique identifier of the instance in UUID format.
- :return: Serialized node as a dictionary.
-
- """
- uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
-
- return self._show_request('nodes',
- uuid=None,
- uri=uri)
-
- @base.handle_errors
- def show_chassis(self, uuid):
- """Gets a specific chassis.
-
- :param uuid: Unique identifier of the chassis in UUID format.
- :return: Serialized chassis as a dictionary.
-
- """
- return self._show_request('chassis', uuid)
-
- @base.handle_errors
- def show_port(self, uuid):
- """Gets a specific port.
-
- :param uuid: Unique identifier of the port in UUID format.
- :return: Serialized port as a dictionary.
-
- """
- return self._show_request('ports', uuid)
-
- @base.handle_errors
- def show_port_by_address(self, address):
- """Gets a specific port by address.
-
- :param address: MAC address of the port.
- :return: Serialized port as a dictionary.
-
- """
- uri = '/ports/detail?address=%s' % address
-
- return self._show_request('ports', uuid=None, uri=uri)
-
- def show_driver(self, driver_name):
- """Gets a specific driver.
-
- :param driver_name: Name of driver.
- :return: Serialized driver as a dictionary.
- """
- return self._show_request('drivers', driver_name)
-
- @base.handle_errors
- def create_node(self, chassis_id=None, **kwargs):
- """Create a baremetal node with the specified parameters.
-
- :param chassis_id: The unique identifier of the chassis.
- :param cpu_arch: CPU architecture of the node. Default: x86_64.
- :param cpus: Number of CPUs. Default: 8.
- :param local_gb: Disk size. Default: 1024.
- :param memory_mb: Available RAM. Default: 4096.
- :param driver: Driver name. Default: "fake"
- :return: A tuple with the server response and the created node.
-
- """
- node = {'chassis_uuid': chassis_id,
- 'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
- 'cpus': kwargs.get('cpus', 8),
- 'local_gb': kwargs.get('local_gb', 1024),
- 'memory_mb': kwargs.get('memory_mb', 4096)},
- 'driver': kwargs.get('driver', 'fake')}
-
- return self._create_request('nodes', node)
-
- @base.handle_errors
- def create_chassis(self, **kwargs):
- """Create a chassis with the specified parameters.
-
- :param description: The description of the chassis.
- Default: test-chassis
- :return: A tuple with the server response and the created chassis.
-
- """
- chassis = {'description': kwargs.get('description', 'test-chassis')}
-
- return self._create_request('chassis', chassis)
-
- @base.handle_errors
- def create_port(self, node_id, **kwargs):
- """Create a port with the specified parameters.
-
- :param node_id: The ID of the node which owns the port.
- :param address: MAC address of the port.
- :param extra: Meta data of the port. Default: {'foo': 'bar'}.
- :param uuid: UUID of the port.
- :return: A tuple with the server response and the created port.
-
- """
- port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
- 'uuid': kwargs['uuid']}
-
- if node_id is not None:
- port['node_uuid'] = node_id
-
- if kwargs['address'] is not None:
- port['address'] = kwargs['address']
-
- return self._create_request('ports', port)
-
- @base.handle_errors
- def delete_node(self, uuid):
- """Deletes a node having the specified UUID.
-
- :param uuid: The unique identifier of the node.
- :return: A tuple with the server response and the response body.
-
- """
- return self._delete_request('nodes', uuid)
-
- @base.handle_errors
- def delete_chassis(self, uuid):
- """Deletes a chassis having the specified UUID.
-
- :param uuid: The unique identifier of the chassis.
- :return: A tuple with the server response and the response body.
-
- """
- return self._delete_request('chassis', uuid)
-
- @base.handle_errors
- def delete_port(self, uuid):
- """Deletes a port having the specified UUID.
-
- :param uuid: The unique identifier of the port.
- :return: A tuple with the server response and the response body.
-
- """
- return self._delete_request('ports', uuid)
-
- @base.handle_errors
- def update_node(self, uuid, **kwargs):
- """Update the specified node.
-
- :param uuid: The unique identifier of the node.
- :return: A tuple with the server response and the updated node.
-
- """
- node_attributes = ('properties/cpu_arch',
- 'properties/cpus',
- 'properties/local_gb',
- 'properties/memory_mb',
- 'driver',
- 'instance_uuid')
-
- patch = self._make_patch(node_attributes, **kwargs)
-
- return self._patch_request('nodes', uuid, patch)
-
- @base.handle_errors
- def update_chassis(self, uuid, **kwargs):
- """Update the specified chassis.
-
- :param uuid: The unique identifier of the chassis.
- :return: A tuple with the server response and the updated chassis.
-
- """
- chassis_attributes = ('description',)
- patch = self._make_patch(chassis_attributes, **kwargs)
-
- return self._patch_request('chassis', uuid, patch)
-
- @base.handle_errors
- def update_port(self, uuid, patch):
- """Update the specified port.
-
- :param uuid: The unique identifier of the port.
- :param patch: List of dicts representing json patches.
- :return: A tuple with the server response and the updated port.
-
- """
-
- return self._patch_request('ports', uuid, patch)
-
- @base.handle_errors
- def set_node_power_state(self, node_uuid, state):
- """Set power state of the specified node.
-
- :param node_uuid: The unique identifier of the node.
- :param state: desired state to set (on/off/reboot).
-
- """
- target = {'target': state}
- return self._put_request('nodes/%s/states/power' % node_uuid,
- target)
-
- @base.handle_errors
- def validate_driver_interface(self, node_uuid):
- """Get all driver interfaces of a specific node.
-
- :param node_uuid: Unique identifier of the node in UUID format.
-
- """
-
- uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
- res='nodes',
- uuid=node_uuid,
- postf='validate')
-
- return self._show_request('nodes', node_uuid, uri=uri)
-
- @base.handle_errors
- def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
- """Set the boot device of the specified node.
-
- :param node_uuid: The unique identifier of the node.
- :param boot_device: The boot device name.
- :param persistent: Boolean value. True if the boot device will
- persist to all future boots, False if not.
- Default: False.
-
- """
- request = {'boot_device': boot_device, 'persistent': persistent}
- resp, body = self._put_request('nodes/%s/management/boot_device' %
- node_uuid, request)
- self.expected_success(204, resp.status)
- return body
-
- @base.handle_errors
- def get_node_boot_device(self, node_uuid):
- """Get the current boot device of the specified node.
-
- :param node_uuid: The unique identifier of the node.
-
- """
- path = 'nodes/%s/management/boot_device' % node_uuid
- resp, body = self._list_request(path)
- self.expected_success(200, resp.status)
- return body
-
- @base.handle_errors
- def get_node_supported_boot_devices(self, node_uuid):
- """Get the supported boot devices of the specified node.
-
- :param node_uuid: The unique identifier of the node.
-
- """
- path = 'nodes/%s/management/boot_device/supported' % node_uuid
- resp, body = self._list_request(path)
- self.expected_success(200, resp.status)
- return body
-
- @base.handle_errors
- def get_console(self, node_uuid):
- """Get connection information about the console.
-
- :param node_uuid: Unique identifier of the node in UUID format.
-
- """
-
- resp, body = self._show_request('nodes/states/console', node_uuid)
- self.expected_success(200, resp.status)
- return resp, body
-
- @base.handle_errors
- def set_console_mode(self, node_uuid, enabled):
- """Start and stop the node console.
-
- :param node_uuid: Unique identifier of the node in UUID format.
- :param enabled: Boolean value; whether to enable or disable the
- console.
-
- """
-
- enabled = {'enabled': enabled}
- resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
- enabled)
- self.expected_success(202, resp.status)
- return resp, body
diff --git a/tempest/services/identity/__init__.py b/tempest/services/identity/__init__.py
deleted file mode 100644
index 53c223f..0000000
--- a/tempest/services/identity/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.lib.services.identity import v2
-from tempest.services.identity import v3
-
-__all__ = ['v2', 'v3']
diff --git a/tempest/services/identity/v3/__init__.py b/tempest/services/identity/v3/__init__.py
deleted file mode 100644
index 6e64a7d..0000000
--- a/tempest/services/identity/v3/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.lib.services.identity.v3.credentials_client import \
- CredentialsClient
-from tempest.lib.services.identity.v3.endpoints_client import EndPointsClient
-from tempest.lib.services.identity.v3.groups_client import GroupsClient
-from tempest.lib.services.identity.v3.identity_client import IdentityClient
-from tempest.lib.services.identity.v3.inherited_roles_client import \
- InheritedRolesClient
-from tempest.lib.services.identity.v3.policies_client import PoliciesClient
-from tempest.lib.services.identity.v3.projects_client import ProjectsClient
-from tempest.lib.services.identity.v3.regions_client import RegionsClient
-from tempest.lib.services.identity.v3.role_assignments_client import \
- RoleAssignmentsClient
-from tempest.lib.services.identity.v3.roles_client import RolesClient
-from tempest.lib.services.identity.v3.services_client import ServicesClient
-from tempest.lib.services.identity.v3.token_client import V3TokenClient
-from tempest.lib.services.identity.v3.trusts_client import TrustsClient
-from tempest.lib.services.identity.v3.users_client import UsersClient
-from tempest.services.identity.v3.json.domains_client import DomainsClient
-
-__all__ = ['CredentialsClient', 'EndPointsClient', 'GroupsClient',
- 'IdentityClient', 'InheritedRolesClient', 'PoliciesClient',
- 'ProjectsClient', 'RegionsClient', 'RoleAssignmentsClient',
- 'RolesClient', 'ServicesClient', 'V3TokenClient', 'TrustsClient',
- 'UsersClient', 'DomainsClient', ]
diff --git a/tempest/services/identity/v3/json/__init__.py b/tempest/services/identity/v3/json/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/identity/v3/json/__init__.py
+++ /dev/null
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 9445e34..6d656ec 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from six.moves import http_client as httplib
from six.moves.urllib import parse as urlparse
@@ -189,7 +188,7 @@
# Send the PUT request and the headers including the "Expect" header
conn.putrequest('PUT', path)
- for header, value in six.iteritems(headers):
+ for header, value in headers.items():
conn.putheader(header, value)
conn.endheaders()
diff --git a/tempest/test.py b/tempest/test.py
index af97932..039afa1 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -64,7 +64,6 @@
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
- 'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'network': True,
'identity': True,
@@ -157,7 +156,7 @@
if status_code is None or status_code == exc_status_code:
LOG.error('Hints: This test was made for the bug %s. '
'The failure could be related to '
- 'https://launchpad.net/bugs/%s' % (bug, bug))
+ 'https://launchpad.net/bugs/%s', bug, bug)
raise exc
return wrapper
return decorator
@@ -220,7 +219,6 @@
"""
setUpClassCalled = False
- _service = None
# NOTE(andreaf) credentials holds a list of the credentials to be allocated
# at class setup time. Credential types can be 'primary', 'alt', 'admin' or
@@ -264,8 +262,8 @@
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
- LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
- etype, cls.__name__))
+ LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass.",
+ etype, cls.__name__)
cls.tearDownClass()
try:
six.reraise(etype, value, trace)
@@ -297,9 +295,9 @@
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
- LOG.info("tearDownClass of %s failed: %s" % (name, te))
+ LOG.info("tearDownClass of %s failed: %s", name, te)
else:
- LOG.exception("teardown of %s failed: %s" % (name, te))
+ LOG.exception("teardown of %s failed: %s", name, te)
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, and not before, re-raise
@@ -534,8 +532,7 @@
else:
raise lib_exc.InvalidCredentials(
"Invalid credentials type %s" % credential_type)
- manager = cls.client_manager(credentials=creds.credentials,
- service=cls._service)
+ manager = cls.client_manager(credentials=creds.credentials)
# NOTE(andreaf) Ensure credentials have user and project id fields.
# It may not be the case when using pre-provisioned credentials.
manager.auth_provider.set_auth()
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index f8d5d9d..e9f59af 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -143,7 +143,7 @@
plug.obj.register_opts(conf)
except Exception:
LOG.exception('Plugin %s raised an exception trying to run '
- 'register_opts' % plug.name)
+ 'register_opts', plug.name)
def get_plugin_options_list(self):
plugin_options = []
@@ -163,4 +163,4 @@
plug.name, service_clients)
except Exception:
LOG.exception('Plugin %s raised an exception trying to run '
- 'get_service_clients' % plug.name)
+ 'get_service_clients', plug.name)
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index b08954f..6773b2f 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -75,7 +75,7 @@
fake_domain_list = {'domains': [{'id': 'fake_domain',
'name': 'Fake_Domain'}]}
self.useFixture(fixtures.MockPatch(''.join([
- 'tempest.services.identity.v3.json.domains_client.'
+ 'tempest.lib.services.identity.v3.domains_client.'
'DomainsClient.list_domains']),
return_value=fake_domain_list))
self.useFixture(fixtures.MockPatch(
@@ -121,7 +121,7 @@
super(TestAccountGeneratorV3, self).setUp()
fake_domain_list = {'domains': [{'id': 'fake_domain'}]}
self.useFixture(fixtures.MockPatch(''.join([
- 'tempest.services.identity.v3.json.domains_client.'
+ 'tempest.lib.services.identity.v3.domains_client.'
'DomainsClient.list_domains']),
return_value=fake_domain_list))
diff --git a/tempest/tests/common/test_dynamic_creds.py b/tempest/tests/common/test_dynamic_creds.py
index a90ca8a..b4fbd50 100644
--- a/tempest/tests/common/test_dynamic_creds.py
+++ b/tempest/tests/common/test_dynamic_creds.py
@@ -27,6 +27,7 @@
v2_tenants_client
from tempest.lib.services.identity.v2 import token_client as v2_token_client
from tempest.lib.services.identity.v2 import users_client as v2_users_client
+from tempest.lib.services.identity.v3 import domains_client
from tempest.lib.services.identity.v3 import identity_client as v3_iden_client
from tempest.lib.services.identity.v3 import projects_client as \
v3_projects_client
@@ -35,7 +36,6 @@
from tempest.lib.services.identity.v3 import users_client as \
v3_users_client
from tempest.lib.services.network import routers_client
-from tempest.services.identity.v3.json import domains_client
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_http
diff --git a/tempest/tests/common/test_preprov_creds.py b/tempest/tests/common/test_preprov_creds.py
index f824b6c..1c9982c 100644
--- a/tempest/tests/common/test_preprov_creds.py
+++ b/tempest/tests/common/test_preprov_creds.py
@@ -101,7 +101,7 @@
preprov_creds.PreProvisionedCredentialProvider.HASH_CRED_FIELDS)
for account in accounts_list:
hash = hashlib.md5()
- account_for_hash = dict((k, v) for (k, v) in six.iteritems(account)
+ account_for_hash = dict((k, v) for (k, v) in account.items()
if k in hash_fields)
hash.update(six.text_type(account_for_hash).encode('utf-8'))
temp_hash = hash.hexdigest()
diff --git a/tempest/tests/lib/fake_identity.py b/tempest/tests/lib/fake_identity.py
index 831f8b5..8bae34f 100644
--- a/tempest/tests/lib/fake_identity.py
+++ b/tempest/tests/lib/fake_identity.py
@@ -55,6 +55,7 @@
},
"user": {
"id": "fake_alt_user_id",
+ "password_expires_at": None,
},
"serviceCatalog": CATALOG_V2,
},
@@ -71,6 +72,7 @@
},
"user": {
"id": "fake_user_id",
+ "password_expires_at": None,
},
"serviceCatalog": CATALOG_V2,
},
@@ -83,18 +85,21 @@
"id": "first_compute_fake_service",
"interface": "public",
"region": "NoMatchRegion",
+ "region_id": "NoMatchRegion",
"url": "http://fake_url/v3/first_endpoint/api"
},
{
"id": "second_fake_service",
"interface": "public",
"region": "FakeRegion",
+ "region_id": "FakeRegion",
"url": "http://fake_url/v3/second_endpoint/api"
},
{
"id": "third_fake_service",
"interface": "admin",
"region": "MiddleEarthRegion",
+ "region_id": "MiddleEarthRegion",
"url": "http://fake_url/v3/third_endpoint/api"
}
@@ -108,6 +113,7 @@
IDENTITY_V3_RESPONSE = {
"token": {
+ "audit_ids": ["ny5LA5YXToa_mAVO8Hnupw", "9NPTvsRDSkmsW61abP978Q"],
"methods": [
"token",
"password"
@@ -127,7 +133,8 @@
"name": "domain_name"
},
"id": "fake_user_id",
- "name": "username"
+ "name": "username",
+ "password_expires_at": None,
},
"issued_at": "2013-05-29T16:55:21.468960Z",
"catalog": CATALOG_V3
@@ -136,6 +143,7 @@
IDENTITY_V3_RESPONSE_DOMAIN_SCOPE = {
"token": {
+ "audit_ids": ["ny5LA5YXToa_mAVO8Hnupw", "9NPTvsRDSkmsW61abP978Q"],
"methods": [
"token",
"password"
@@ -151,7 +159,8 @@
"name": "domain_name"
},
"id": "fake_user_id",
- "name": "username"
+ "name": "username",
+ "password_expires_at": None,
},
"issued_at": "2013-05-29T16:55:21.468960Z",
"catalog": CATALOG_V3
@@ -160,6 +169,7 @@
IDENTITY_V3_RESPONSE_NO_SCOPE = {
"token": {
+ "audit_ids": ["ny5LA5YXToa_mAVO8Hnupw", "9NPTvsRDSkmsW61abP978Q"],
"methods": [
"token",
"password"
@@ -171,7 +181,8 @@
"name": "domain_name"
},
"id": "fake_user_id",
- "name": "username"
+ "name": "username",
+ "password_expires_at": None,
},
"issued_at": "2013-05-29T16:55:21.468960Z",
}
diff --git a/tempest/tests/lib/services/identity/v3/test_domains_client.py b/tempest/tests/lib/services/identity/v3/test_domains_client.py
new file mode 100644
index 0000000..f89ced7
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_domains_client.py
@@ -0,0 +1,138 @@
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import domains_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestDomainsClient(base.BaseServiceTest):
+ FAKE_CREATE_DOMAIN = {
+ "domain": {
+ "description": "Domain description",
+ "enabled": True,
+ "name": "myDomain"
+ }
+ }
+
+ FAKE_DOMAIN_INFO = {
+ "domain": {
+ "description": "Used for swift functional testing",
+ "enabled": True,
+ "id": "5a75994a3",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/5a75994a3"
+ },
+ "name": "swift_test"
+ }
+ }
+
+ FAKE_LIST_DOMAINS = {
+ "domains": [
+ {
+ "description": "Used for swift functional testing",
+ "enabled": True,
+ "id": "5a75994a3",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/5a75994a3"
+ },
+ "name": "swift_test"
+ },
+ {
+ "description": "Owns users and tenants available on " +
+ "Identity API",
+ "enabled": True,
+ "id": "default",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/default"
+ },
+ "name": "Default"
+ }
+ ],
+ "links": {
+ "next": None,
+ "previous": None,
+ "self": "http://example.com/identity/v3/domains"
+ }
+ }
+
+ def setUp(self):
+ super(TestDomainsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = domains_client.DomainsClient(fake_auth,
+ 'identity',
+ 'regionOne')
+
+ def _test_create_domain(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_domain,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_DOMAIN,
+ bytes_body,
+ status=201)
+
+ def _test_show_domain(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_domain,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_DOMAIN_INFO,
+ bytes_body,
+ domain_id="5a75994a3")
+
+ def _test_list_domains(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_domains,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_DOMAINS,
+ bytes_body)
+
+ def _test_update_domain(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_domain,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_DOMAIN_INFO,
+ bytes_body,
+ domain_id="5a75994a3")
+
+ def test_create_domain_with_str_body(self):
+ self._test_create_domain()
+
+ def test_create_domain_with_bytes_body(self):
+ self._test_create_domain(bytes_body=True)
+
+ def test_show_domain_with_str_body(self):
+ self._test_show_domain()
+
+ def test_show_domain_with_bytes_body(self):
+ self._test_show_domain(bytes_body=True)
+
+ def test_list_domain_with_str_body(self):
+ self._test_list_domains()
+
+ def test_list_domain_with_bytes_body(self):
+ self._test_list_domains(bytes_body=True)
+
+ def test_update_domain_with_str_body(self):
+ self._test_update_domain()
+
+ def test_update_domain_with_bytes_body(self):
+ self._test_update_domain(bytes_body=True)
+
+ def test_delete_domain(self):
+ self.check_service_client_function(
+ self.client.delete_domain,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ domain_id="5a75994a3",
+ status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_token_client.py b/tempest/tests/lib/services/identity/v3/test_token_client.py
index 9f4b4cc..38e8c4a 100644
--- a/tempest/tests/lib/services/identity/v3/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_token_client.py
@@ -20,7 +20,7 @@
from tempest.lib import exceptions
from tempest.lib.services.identity.v3 import token_client
from tempest.tests import base
-from tempest.tests.lib import fake_http
+from tempest.tests.lib import fake_identity
class TestTokenClientV3(base.TestCase):
@@ -31,10 +31,8 @@
def test_auth(self):
token_client_v3 = token_client.V3TokenClient('fake_url')
- response = fake_http.fake_http_response(
- None, status=201,
- )
- body = {'access': {'token': 'fake_token'}}
+ response, body_text = fake_identity._fake_v3_response(None, None)
+ body = json.loads(body_text)
with mock.patch.object(token_client_v3, 'post') as post_mock:
post_mock.return_value = response, body
@@ -60,10 +58,8 @@
def test_auth_with_project_id_and_domain_id(self):
token_client_v3 = token_client.V3TokenClient('fake_url')
- response = fake_http.fake_http_response(
- None, status=201,
- )
- body = {'access': {'token': 'fake_token'}}
+ response, body_text = fake_identity._fake_v3_response(None, None)
+ body = json.loads(body_text)
with mock.patch.object(token_client_v3, 'post') as post_mock:
post_mock.return_value = response, body
@@ -103,10 +99,8 @@
def test_auth_with_tenant(self):
token_client_v3 = token_client.V3TokenClient('fake_url')
- response = fake_http.fake_http_response(
- None, status=201,
- )
- body = {'access': {'token': 'fake_token'}}
+ response, body_text = fake_identity._fake_v3_response(None, None)
+ body = json.loads(body_text)
with mock.patch.object(token_client_v3, 'post') as post_mock:
post_mock.return_value = response, body
@@ -138,13 +132,10 @@
def test_request_with_str_body(self):
token_client_v3 = token_client.V3TokenClient('fake_url')
- response = fake_http.fake_http_response(
- None, status=200,
- )
- body = str('{"access": {"token": "fake_token"}}')
with mock.patch.object(token_client_v3, 'raw_request') as mock_raw_r:
- mock_raw_r.return_value = response, body
+ mock_raw_r.return_value = (
+ fake_identity._fake_v3_response(None, None))
resp, body = token_client_v3.request('GET', 'fake_uri')
self.assertIsInstance(body, dict)
@@ -152,10 +143,8 @@
def test_request_with_bytes_body(self):
token_client_v3 = token_client.V3TokenClient('fake_url')
- response = fake_http.fake_http_response(
- None, status=200,
- )
- body = b'{"access": {"token": "fake_token"}}'
+ response, body_text = fake_identity._fake_v3_response(None, None)
+ body = body_text.encode('utf-8')
with mock.patch.object(token_client_v3, 'raw_request') as mock_raw_r:
mock_raw_r.return_value = response, body
diff --git a/tempest/tests/lib/services/image/v2/test_namespace_object_client.py b/tempest/tests/lib/services/image/v2/test_namespace_object_client.py
new file mode 100644
index 0000000..8d29660
--- /dev/null
+++ b/tempest/tests/lib/services/image/v2/test_namespace_object_client.py
@@ -0,0 +1,210 @@
+# Copyright 2016 EasyStack. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.image.v2 import namespace_objects_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestNamespaceObjectClient(base.BaseServiceTest):
+ FAKE_CREATE_SHOW_OBJECTS = {
+ "created_at": "2016-09-19T18:20:56Z",
+ "description": "You can configure the CPU limits.",
+ "name": "CPU Limits",
+ "properties": {
+ "quota:cpu_period": {
+ "description": "Specifies the enforcement interval",
+ "maximum": 1000000,
+ "minimum": 1000,
+ "title": "Quota: CPU Period",
+ "type": "integer"
+ },
+ "quota:cpu_quota": {
+ "description": "Specifies the maximum allowed bandwidth ",
+ "title": "Quota: CPU Quota",
+ "type": "integer"
+ },
+ "quota:cpu_shares": {
+ "description": "Specifies the proportional weighted share.",
+ "title": "Quota: CPU Shares",
+ "type": "integer"
+ }
+ },
+ "required": [],
+ "schema": "/v2/schemas/metadefs/object",
+ "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU",
+ "updated_at": "2016-09-19T18:20:56Z"
+ }
+
+ FAKE_LIST_OBJECTS = {
+ "objects": [
+ {
+ "created_at": "2016-09-18T18:16:35Z",
+ "description": "You can configure the CPU limits.",
+ "name": "CPU Limits",
+ "properties": {
+ "quota:cpu_period": {
+ "description": "Specifies the enforcement interval ",
+ "maximum": 1000000,
+ "minimum": 1000,
+ "title": "Quota: CPU Period",
+ "type": "integer"
+ },
+ "quota:cpu_quota": {
+ "description": "Specifies the maximum.",
+ "title": "Quota: CPU Quota",
+ "type": "integer"
+ },
+ "quota:cpu_shares": {
+ "description": " Desc.",
+ "title": "Quota: CPU Shares",
+ "type": "integer"
+ }
+ },
+ "required": [],
+ "schema": "/v2/schemas/metadefs/object",
+ "self":
+ "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU"
+ },
+ {
+ "created_at": "2016-09-18T18:16:35Z",
+ "description": "Using disk I/O quotas.",
+ "name": "Disk QoS",
+ "properties": {
+ "quota:disk_read_bytes_sec": {
+ "description": "Sets disk I/O quota.",
+ "title": "Quota: Disk read bytes / sec",
+ "type": "integer"
+ },
+ "quota:disk_read_iops_sec": {
+ "description": "Sets disk I/O quota",
+ "title": "Quota: Disk read IOPS / sec",
+ "type": "integer"
+ },
+ "quota:disk_total_bytes_sec": {
+ "description": "Sets disk I/O quota.",
+ "title": "Quota: Disk Total Bytes / sec",
+ "type": "integer"
+ },
+ "quota:disk_total_iops_sec": {
+ "description": "Sets disk I/O quota.",
+ "title": "Quota: Disk Total IOPS / sec",
+ "type": "integer"
+ },
+ "quota:disk_write_bytes_sec": {
+ "description": "Sets disk I/O quota.",
+ "title": "Quota: Disk Write Bytes / sec",
+ "type": "integer"
+ },
+ "quota:disk_write_iops_sec": {
+ "description": "Sets disk I/O quota.",
+ "title": "Quota: Disk Write IOPS / sec",
+ "type": "integer"
+ }
+ },
+ "required": [],
+ "schema": "/v2/schemas/metadefs/object",
+ "self":
+ "/v2/metadefs/namespaces/OS::Compute::Quota/objects/Disk QoS"
+ },
+ ],
+ "schema": "v2/schemas/metadefs/objects"
+ }
+
+ FAKE_UPDATE_OBJECTS = {
+ "description": "You can configure the CPU limits.",
+ "name": "CPU",
+ "properties": {
+ "quota:cpu_shares": {
+ "description": "Specify.",
+ "title": "Quota: CPU Shares",
+ "type": "integer"
+ }
+ },
+ "required": []
+ }
+
+ def setUp(self):
+ super(TestNamespaceObjectClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = namespace_objects_client.NamespaceObjectsClient(
+ fake_auth, 'image', 'regionOne')
+
+ def _test_create_namespace_objects(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_namespace_object,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_SHOW_OBJECTS,
+ bytes_body, status=201,
+ namespace="OS::Compute::Hypervisor",
+ object_name="OS::Glance::Image")
+
+ def _test_list_namespace_objects(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_namespace_objects,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_OBJECTS,
+ bytes_body,
+ namespace="OS::Compute::Hypervisor")
+
+ def _test_show_namespace_objects(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_namespace_object,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_CREATE_SHOW_OBJECTS,
+ bytes_body,
+ namespace="OS::Compute::Hypervisor",
+ object_name="OS::Glance::Image")
+
+ def _test_update_namespace_objects(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_namespace_object,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_OBJECTS,
+ bytes_body,
+ namespace="OS::Compute::Hypervisor",
+ object_name="OS::Glance::Image",
+ name="CPU")
+
+ def test_create_namespace_object_with_str_body(self):
+ self._test_create_namespace_objects()
+
+ def test_create_namespace_object_with_bytes_body(self):
+ self._test_create_namespace_objects(bytes_body=True)
+
+ def test_list_namespace_object_with_str_body(self):
+ self._test_list_namespace_objects()
+
+ def test_list_namespace_object_with_bytes_body(self):
+ self._test_list_namespace_objects(bytes_body=True)
+
+ def test_show_namespace_object_with_str_body(self):
+ self._test_show_namespace_objects()
+
+ def test_show_namespace_object_with_bytes_body(self):
+ self._test_show_namespace_objects(bytes_body=True)
+
+ def test_update_namespace_object_with_str_body(self):
+ self._test_update_namespace_objects()
+
+ def test_update_namespace_object_with_bytes_body(self):
+ self._test_update_namespace_objects(bytes_body=True)
+
+ def test_delete_namespace_objects(self):
+ self.check_service_client_function(
+ self.client.delete_namespace_object,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, namespace="OS::Compute::Hypervisor",
+ object_name="OS::Glance::Image",
+ status=204)
diff --git a/tempest/tests/negative/__init__.py b/tempest/tests/negative/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/tests/negative/__init__.py
+++ /dev/null
diff --git a/tempest/tests/services/object_storage/test_object_client.py b/tempest/tests/services/object_storage/test_object_client.py
index cc1dc1a..748614c 100644
--- a/tempest/tests/services/object_storage/test_object_client.py
+++ b/tempest/tests/services/object_storage/test_object_client.py
@@ -15,7 +15,6 @@
import mock
-import six
from tempest.lib import exceptions
from tempest.services.object_storage import object_client
@@ -85,7 +84,7 @@
# Verify that headers were written, including "Expect:100-continue"
calls = []
- for header, value in six.iteritems(expected_hdrs):
+ for header, value in expected_hdrs.items():
calls.append(mock.call(header, value))
mock_poc.return_value.putheader.assert_has_calls(calls, False)
diff --git a/tox.ini b/tox.ini
index 7a36e84..46823d8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -146,7 +146,7 @@
ignore = E125,E123,E129
show-source = True
exclude = .git,.venv,.tox,dist,doc,*egg
-enable-extensions = H106,H203
+enable-extensions = H106,H203,H904
[testenv:releasenotes]
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html