Merge "Fix docstrings to match with method arguments"
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 4a5bab5..062e920 100755
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -270,6 +270,9 @@
'SHUTOFF')
self.client.resize_server(self.server_id, self.flavor_ref_alt)
+ # NOTE(jlk): Explicitly delete the server to get a new one for later
+ # tests. Avoids resize down race issues.
+ self.addCleanup(self.delete_server, self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'VERIFY_RESIZE')
@@ -285,10 +288,6 @@
# NOTE(mriedem): tearDown requires the server to be started.
self.client.start_server(self.server_id)
- # NOTE(jlk): Explicitly delete the server to get a new one for later
- # tests. Avoids resize down race issues.
- self.addCleanup(self.delete_server, self.server_id)
-
@test.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@@ -309,6 +308,9 @@
# values after a resize is reverted
self.client.resize_server(self.server_id, self.flavor_ref_alt)
+ # NOTE(zhufl): Explicitly delete the server to get a new one for later
+ # tests. Avoids resize down race issues.
+ self.addCleanup(self.delete_server, self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'VERIFY_RESIZE')
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index baeaa0c..2686af2 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -26,6 +26,13 @@
credentials = ['primary', 'alt', 'admin']
@classmethod
+ def skip_checks(cls):
+ super(FloatingIPAdminTestJSON, cls).skip_checks()
+ if not test.is_extension_enabled('router', 'network'):
+ msg = "router extension not enabled."
+ raise cls.skipException(msg)
+
+ @classmethod
def setup_clients(cls):
super(FloatingIPAdminTestJSON, cls).setup_clients()
cls.alt_floating_ips_client = cls.alt_manager.floating_ips_client
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
new file mode 100644
index 0000000..8a21853
--- /dev/null
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -0,0 +1,79 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import operator
+
+from tempest.api.volume import base
+from tempest import test
+
+
+class BackendsCapabilitiesAdminV2TestsJSON(base.BaseVolumeAdminTest):
+
+ CAPABILITIES = ('namespace',
+ 'vendor_name',
+ 'volume_backend_name',
+ 'pool_name',
+ 'driver_version',
+ 'storage_protocol',
+ 'display_name',
+ 'description',
+ 'visibility',
+ 'properties')
+
+ @classmethod
+ def resource_setup(cls):
+ super(BackendsCapabilitiesAdminV2TestsJSON, cls).resource_setup()
+ # Get host list, formation: host@backend-name
+ cls.hosts = [
+ pool['name'] for pool in
+ cls.admin_volume_client.show_pools()['pools']
+ ]
+
+ @test.idempotent_id('3750af44-5ea2-4cd4-bc3e-56e7e6caf854')
+ def test_get_capabilities_backend(self):
+ # Test backend properties
+ backend = self.admin_volume_client.show_backend_capabilities(
+ self.hosts[0])
+
+ # Verify getting capabilities parameters from a backend
+ for key in self.CAPABILITIES:
+ self.assertIn(key, backend)
+
+ @test.idempotent_id('a9035743-d46a-47c5-9cb7-3c80ea16dea0')
+ def test_compare_volume_stats_values(self):
+ # Test values comparison between show_backend_capabilities
+ # to show_pools
+ VOLUME_STATS = ('vendor_name',
+ 'volume_backend_name',
+ 'storage_protocol')
+
+ # Get list backend capabilities using show_pools
+ cinder_pools = [
+ pool['capabilities'] for pool in
+ self.admin_volume_client.show_pools(detail=True)['pools']
+ ]
+
+ # Get list backends capabilities using show_backend_capabilities
+ capabilities = [
+ self.admin_volume_client.show_backend_capabilities(
+ host=host) for host in self.hosts
+ ]
+
+ # Returns a tuple of VOLUME_STATS values
+ expected_list = map(operator.itemgetter(*VOLUME_STATS),
+ cinder_pools)
+ observed_list = map(operator.itemgetter(*VOLUME_STATS),
+ capabilities)
+ self.assertEqual(expected_list, observed_list)
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index c501ffc..60a35b0 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -42,11 +42,13 @@
super(VolumesV2ListTestJSON, cls).resource_setup()
# Create 3 test volumes
- cls.volume_id_list = []
cls.metadata = {'Type': 'work'}
+ # NOTE(zhufl): When using pre-provisioned credentials, the project
+ # may have volumes other than those created below.
+ existing_volumes = cls.client.list_volumes()['volumes']
+ cls.volume_id_list = [vol['id'] for vol in existing_volumes]
for i in range(3):
volume = cls.create_volume(metadata=cls.metadata)
- volume = cls.client.show_volume(volume['id'])['volume']
cls.volume_id_list.append(volume['id'])
@test.idempotent_id('2a7064eb-b9c3-429b-b888-33928fc5edd3')
diff --git a/tempest/common/utils/net_utils.py b/tempest/common/utils/net_utils.py
index fd0391d..f0d3da3 100644
--- a/tempest/common/utils/net_utils.py
+++ b/tempest/common/utils/net_utils.py
@@ -37,6 +37,11 @@
for fixed_ip in port.get('fixed_ips'):
alloc_set.add(fixed_ip['ip_address'])
+ # exclude gateway_ip of subnet
+ gateway_ip = subnet['subnet']['gateway_ip']
+ if gateway_ip:
+ alloc_set.add(gateway_ip)
+
av_set = subnet_set - alloc_set
addrs = []
for cidr in reversed(av_set.iter_cidrs()):
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 62ab67d..fdccfc3 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -87,45 +87,10 @@
cls.volumes_client = cls.manager.volumes_v2_client
cls.snapshots_client = cls.manager.snapshots_v2_client
- # ## Methods to handle sync and async deletes
-
- def setUp(self):
- super(ScenarioTest, self).setUp()
- self.cleanup_waits = []
- # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
- # because scenario tests in the same test class should not share
- # resources. If resources were shared between test cases then it
- # should be a single scenario test instead of multiples.
-
- # NOTE(yfried): this list is cleaned at the end of test_methods and
- # not at the end of the class
- self.addCleanup(self._wait_for_cleanups)
-
- def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
- cleanup_callable, cleanup_args=None,
- cleanup_kwargs=None, waiter_client=None):
- """Adds wait for async resource deletion at the end of cleanups
-
- @param waiter_callable: callable to wait for the resource to delete
- with the following waiter_client if specified.
- @param thing_id: the id of the resource to be cleaned-up
- @param thing_id_param: the name of the id param in the waiter
- @param cleanup_callable: method to load pass to self.addCleanup with
- the following *cleanup_args, **cleanup_kwargs.
- usually a delete method.
- """
- if cleanup_args is None:
- cleanup_args = []
- if cleanup_kwargs is None:
- cleanup_kwargs = {}
- self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
- wait_dict = {
- 'waiter_callable': waiter_callable,
- thing_id_param: thing_id
- }
- if waiter_client:
- wait_dict['client'] = waiter_client
- self.cleanup_waits.append(wait_dict)
+ # ## Test functions library
+ #
+ # The create_[resource] functions only return body and discard the
+ # resp part which is not used in scenario tests
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
@@ -142,23 +107,6 @@
client.delete_port, port['id'])
return port
- def _wait_for_cleanups(self):
- # To handle async delete actions, a list of waits is added
- # which will be iterated over as the last step of clearing the
- # cleanup queue. That way all the delete calls are made up front
- # and the tests won't succeed unless the deletes are eventually
- # successful. This is the same basic approach used in the api tests to
- # limit cleanup execution time except here it is multi-resource,
- # because of the nature of the scenario tests.
- for wait in self.cleanup_waits:
- waiter_callable = wait.pop('waiter_callable')
- waiter_callable(**wait)
-
- # ## Test functions library
- #
- # The create_[resource] functions only return body and discard the
- # resp part which is not used in scenario tests
-
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
@@ -170,7 +118,7 @@
def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until=None,
- wait_on_delete=True, clients=None, **kwargs):
+ clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
@@ -256,18 +204,10 @@
name=name, flavor=flavor,
image_id=image_id, **kwargs)
- # TODO(jlanoux) Move wait_on_delete in compute.py
- if wait_on_delete:
- self.addCleanup(waiters.wait_for_server_termination,
- clients.servers_client,
- body['id'])
-
- self.addCleanup_with_wait(
- waiter_callable=waiters.wait_for_server_termination,
- thing_id=body['id'], thing_id_param='server_id',
- cleanup_callable=test_utils.call_and_ignore_notfound_exc,
- cleanup_args=[clients.servers_client.delete_server, body['id']],
- waiter_client=clients.servers_client)
+ self.addCleanup(waiters.wait_for_server_termination,
+ clients.servers_client, body['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ clients.servers_client.delete_server, body['id'])
server = clients.servers_client.show_server(body['id'])['server']
return server
@@ -481,11 +421,12 @@
image = _images_client.create_image(server['id'], name=name)
image_id = image.response['location'].split('images/')[1]
waiters.wait_for_image_status(_image_client, image_id, 'active')
- self.addCleanup_with_wait(
- waiter_callable=_image_client.wait_for_resource_deletion,
- thing_id=image_id, thing_id_param='id',
- cleanup_callable=test_utils.call_and_ignore_notfound_exc,
- cleanup_args=[_image_client.delete_image, image_id])
+
+ self.addCleanup(_image_client.wait_for_resource_deletion,
+ image_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ _image_client.delete_image, image_id)
+
if CONF.image_feature_enabled.api_v1:
# In glance v1 the additional properties are stored in the headers.
resp = _image_client.check_image(image_id)
diff --git a/tempest/services/volume/base/admin/base_types_client.py b/tempest/services/volume/base/admin/base_types_client.py
index 2effaae..83870ae 100755
--- a/tempest/services/volume/base/admin/base_types_client.py
+++ b/tempest/services/volume/base/admin/base_types_client.py
@@ -202,42 +202,3 @@
"/types/%s/encryption/provider" % volume_type_id)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
-
- def add_type_access(self, volume_type_id, **kwargs):
- """Adds volume type access for the given project.
-
- Available params: see http://developer.openstack.org/
- api-ref-blockstorage-v2.html
- #createVolumeTypeAccessExt
- """
- post_body = json.dumps({'addProjectAccess': kwargs})
- url = 'types/%s/action' % volume_type_id
- resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- def remove_type_access(self, volume_type_id, **kwargs):
- """Removes volume type access for the given project.
-
- Available params: see http://developer.openstack.org/
- api-ref-blockstorage-v2.html
- #removeVolumeTypeAccessExt
- """
- post_body = json.dumps({'removeProjectAccess': kwargs})
- url = 'types/%s/action' % volume_type_id
- resp, body = self.post(url, post_body)
- self.expected_success(202, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- def list_type_access(self, volume_type_id):
- """Print access information about the given volume type.
-
- Available params: see http://developer.openstack.org/
- api-ref-blockstorage-v2.html#
- listVolumeTypeAccessExt
- """
- url = 'types/%s/os-volume-type-access' % volume_type_id
- resp, body = self.get(url)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
diff --git a/tempest/services/volume/base/base_volumes_client.py b/tempest/services/volume/base/base_volumes_client.py
index d694c53..c2e2b92 100755
--- a/tempest/services/volume/base/base_volumes_client.py
+++ b/tempest/services/volume/base/base_volumes_client.py
@@ -63,7 +63,11 @@
return rest_client.ResponseBody(resp, body)
def show_pools(self, detail=False):
- # List all the volumes pools (hosts)
+ """List all the volumes pools (hosts).
+
+ Output params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#listPools
+ """
url = 'scheduler-stats/get_pools'
if detail:
url += '?detail=True'
@@ -73,6 +77,19 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def show_backend_capabilities(self, host):
+ """Shows capabilities for a storage back end.
+
+ Output params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html
+ #showBackendCapabilities
+ """
+ url = 'capabilities/%s' % host
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def show_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % str(volume_id)
diff --git a/tempest/services/volume/v2/json/admin/types_client.py b/tempest/services/volume/v2/json/admin/types_client.py
index ecf5131..f76e8dc 100644
--- a/tempest/services/volume/v2/json/admin/types_client.py
+++ b/tempest/services/volume/v2/json/admin/types_client.py
@@ -13,9 +13,51 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
from tempest.services.volume.base.admin import base_types_client
class TypesClient(base_types_client.BaseTypesClient):
"""Client class to send CRUD Volume V2 API requests"""
api_version = "v2"
+
+ def add_type_access(self, volume_type_id, **kwargs):
+ """Adds volume type access for the given project.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html
+ #createVolumeTypeAccessExt
+ """
+ post_body = json.dumps({'addProjectAccess': kwargs})
+ url = 'types/%s/action' % volume_type_id
+ resp, body = self.post(url, post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def remove_type_access(self, volume_type_id, **kwargs):
+ """Removes volume type access for the given project.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html
+ #removeVolumeTypeAccessExt
+ """
+ post_body = json.dumps({'removeProjectAccess': kwargs})
+ url = 'types/%s/action' % volume_type_id
+ resp, body = self.post(url, post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_type_access(self, volume_type_id):
+ """Print access information about the given volume type.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#
+ listVolumeTypeAccessExt
+ """
+ url = 'types/%s/os-volume-type-access' % volume_type_id
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)