Merge "Remove slow tag from test_minbw test"
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index c7004dd..c1981f9 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -126,16 +126,16 @@
.. code-block:: python
- class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+ class BaseTestCase1(api_version_utils.BaseMicroversionTest):
- [..]
- @classmethod
- def skip_checks(cls):
- super(BaseTestCase1, cls).skip_checks()
- api_version_utils.check_skip_with_microversion(cls.min_microversion,
- cls.max_microversion,
- CONF.compute.min_microversion,
- CONF.compute.max_microversion)
+ [..]
+ @classmethod
+ def skip_checks(cls):
+ super(BaseTestCase1, cls).skip_checks()
+ api_version_utils.check_skip_with_microversion(cls.min_microversion,
+ cls.max_microversion,
+ CONF.compute.min_microversion,
+ CONF.compute.max_microversion)
Skip logic can be added in tests base class or any specific test class depends on
tests class structure.
diff --git a/doc/source/plugins/plugin.rst b/doc/source/plugins/plugin.rst
index ab1b0b1..6726def 100644
--- a/doc/source/plugins/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -268,12 +268,12 @@
class MyAPIClient(rest_client.RestClient):
- def __init__(self, auth_provider, service, region,
- my_arg, my_arg2=True, **kwargs):
- super(MyAPIClient, self).__init__(
- auth_provider, service, region, **kwargs)
- self.my_arg = my_arg
- self.my_args2 = my_arg
+ def __init__(self, auth_provider, service, region,
+ my_arg, my_arg2=True, **kwargs):
+ super(MyAPIClient, self).__init__(
+ auth_provider, service, region, **kwargs)
+ self.my_arg = my_arg
+ self.my_args2 = my_arg
Finally the service client should be structured in a python module, so that all
service client classes are importable from it. Each major API version should
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 0a29b7b..34df089 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -76,54 +76,54 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def skip_checks(cls):
- """This section is used to evaluate config early and skip all test
- methods based on these checks
- """
- super(TestExampleCase, cls).skip_checks()
- if not CONF.section.foo
- cls.skip('A helpful message')
+ @classmethod
+ def skip_checks(cls):
+ """This section is used to evaluate config early and skip all test
+ methods based on these checks
+ """
+ super(TestExampleCase, cls).skip_checks()
+ if not CONF.section.foo
+ cls.skip('A helpful message')
- @classmethod
- def setup_credentials(cls):
- """This section is used to do any manual credential allocation and also
- in the case of dynamic credentials to override the default network
- resource creation/auto allocation
- """
- # This call is used to tell the credential allocator to not create any
- # network resources for this test case. It also enables selective
- # creation of other neutron resources. NOTE: it must go before the
- # super call
- cls.set_network_resources()
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ """This section is used to do any manual credential allocation and also
+ in the case of dynamic credentials to override the default network
+ resource creation/auto allocation
+ """
+ # This call is used to tell the credential allocator to not create any
+ # network resources for this test case. It also enables selective
+ # creation of other neutron resources. NOTE: it must go before the
+ # super call
+ cls.set_network_resources()
+ super(TestExampleCase, cls).setup_credentials()
- @classmethod
- def setup_clients(cls):
- """This section is used to setup client aliases from the manager object
- or to initialize any additional clients. Except in a few very
- specific situations you should not need to use this.
- """
- super(TestExampleCase, cls).setup_clients()
- cls.servers_client = cls.os_primary.servers_client
+ @classmethod
+ def setup_clients(cls):
+ """This section is used to setup client aliases from the manager object
+ or to initialize any additional clients. Except in a few very
+ specific situations you should not need to use this.
+ """
+ super(TestExampleCase, cls).setup_clients()
+ cls.servers_client = cls.os_primary.servers_client
- @classmethod
- def resource_setup(cls):
- """This section is used to create any resources or objects which are
- going to be used and shared by **all** test methods in the
- TestCase. Note then anything created in this section must also be
- destroyed in the corresponding resource_cleanup() method (which will
- be run during tearDownClass())
- """
- super(TestExampleCase, cls).resource_setup()
- cls.shared_server = cls.servers_client.create_server(...)
- cls.addClassResourceCleanup(waiters.wait_for_server_termination,
- cls.servers_client,
- cls.shared_server['id'])
- cls.addClassResourceCleanup(
- test_utils.call_and_ignore_notfound_exc(
- cls.servers_client.delete_server,
- cls.shared_server['id']))
+ @classmethod
+ def resource_setup(cls):
+ """This section is used to create any resources or objects which are
+ going to be used and shared by **all** test methods in the
+ TestCase. Note then anything created in this section must also be
+ destroyed in the corresponding resource_cleanup() method (which will
+ be run during tearDownClass())
+ """
+ super(TestExampleCase, cls).resource_setup()
+ cls.shared_server = cls.servers_client.create_server(...)
+ cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+ cls.servers_client,
+ cls.shared_server['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc(
+ cls.servers_client.delete_server,
+ cls.shared_server['id']))
.. _credentials:
@@ -150,9 +150,9 @@
credentials = ['primary', 'admin']
- @classmethod
- def skip_checks(cls):
- ...
+ @classmethod
+ def skip_checks(cls):
+ ...
In this example the ``TestExampleAdmin`` TestCase will allocate 2 sets of
credentials, one regular user and one admin user. The corresponding manager
@@ -225,10 +225,10 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources(network=True, subnet=True, router=False)
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(network=True, subnet=True, router=False)
+ super(TestExampleCase, cls).setup_credentials()
There are 2 quirks with the usage here. First for the set_network_resources
function to work properly it **must be called before super()**. This is so
@@ -242,10 +242,10 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources()
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources()
+ super(TestExampleCase, cls).setup_credentials()
This will not allocate any networking resources. This is because by default all
the arguments default to False.
@@ -282,8 +282,8 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- self.os_primary.servers_client.create_server(...)
+ def test_example_create_server(self):
+ self.os_primary.servers_client.create_server(...)
is all you need to do. As described previously, in the above example the
``self.os_primary`` is created automatically because the base test class sets the
@@ -305,8 +305,8 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- credentials = self.os_primary.credentials
+ def test_example_create_server(self):
+ credentials = self.os_primary.credentials
The credentials object provides access to all of the credential information you
would need to make API requests. For example, building off the previous
@@ -316,9 +316,9 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- credentials = self.os_primary.credentials
- username = credentials.username
- user_id = credentials.user_id
- password = credentials.password
- tenant_id = credentials.tenant_id
+ def test_example_create_server(self):
+ credentials = self.os_primary.credentials
+ username = credentials.username
+ user_id = credentials.user_id
+ password = credentials.password
+ tenant_id = credentials.tenant_id
diff --git a/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
new file mode 100644
index 0000000..8e42e85
--- /dev/null
+++ b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added associate_floating_ip() and dissociate_floating_ip() methods
+ to the scenario manager.
diff --git a/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
new file mode 100644
index 0000000..fd7a874
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+ This is an intermediate release during the Wallaby development cycle to
+ mark the end of support for EM Stein release in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Victoria
+ * Ussuri
+ * Train
+
+ Current development of Tempest is for OpenStack Wallaby development
+ cycle.
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 9340997..342380e 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -112,7 +112,5 @@
server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
self.volumes_client, attachment['volumeId'], 'available')
- volume_after_detach = self.servers_client.list_volume_attachments(
- server['id'])['volumeAttachments']
- self.assertEqual(0, len(volume_after_detach),
- "Failed to detach volume")
+ waiters.wait_for_volume_attachment_remove_from_server(
+ self.servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0601bbe..102792e 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -427,3 +427,33 @@
CONF.compute.build_interval, original_ip_count):
raise lib_exc.TimeoutException(
'Timed out while waiting for IP count to decrease.')
+
+
+class AttachInterfacesV270Test(AttachInterfacesTestBase):
+ """Test interface API with microversion greater than 2.69"""
+ min_microversion = '2.70'
+
+ @decorators.idempotent_id('2853f095-8277-4067-92bd-9f10bd4f8e0c')
+ @utils.services('network')
+ def test_create_get_list_interfaces(self):
+ """Test interface API with microversion greater than 2.69
+
+ Checking create, get, list interface APIs response schema.
+ """
+ server = self.create_test_server(wait_until='ACTIVE')
+ try:
+ iface = self.interfaces_client.create_interface(server['id'])[
+ 'interfaceAttachment']
+ iface = waiters.wait_for_interface_status(
+ self.interfaces_client, server['id'], iface['port_id'],
+ 'ACTIVE')
+ except lib_exc.BadRequest as e:
+ msg = ('Multiple possible networks found, use a Network ID to be '
+ 'more specific.')
+ if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
+ raise
+ else:
+ # just to check the response schema
+ self.interfaces_client.show_interface(
+ server['id'], iface['port_id'])
+ self.interfaces_client.list_interfaces(server['id'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index d85e4f7..7251e36 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -200,6 +200,10 @@
super(AttachVolumeShelveTestJSON, cls).skip_checks()
if not CONF.compute_feature_enabled.shelve:
raise cls.skipException('Shelve is not available.')
+ if CONF.compute.compute_volume_common_az:
+ # assuming cross_az_attach is set to false in nova.conf
+ # per the compute_volume_common_az option description
+ raise cls.skipException('Cross AZ attach not available.')
def _count_volumes(self, server, validation_resources):
# Count number of volumes on an instance
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 28299a4..9e25901 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -402,7 +402,8 @@
# Validate that the list was fetched sorted accordingly
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
- sorted_list = [image['size'] for image in images_list]
+ sorted_list = [image['size'] for image in images_list
+ if image['size'] is not None]
msg = 'The list of images was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 625e08e..e3c33c7 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -317,6 +317,32 @@
'seconds', attachment_id, volume_id, time.time() - start)
+def wait_for_volume_attachment_remove_from_server(
+ client, server_id, volume_id):
+ """Waits for a volume to be removed from a given server.
+
+ This waiter checks the compute API if the volume attachment is removed.
+ """
+ start = int(time.time())
+ volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+ while any(volume for volume in volumes if volume['volumeId'] == volume_id):
+ time.sleep(client.build_interval)
+
+ timed_out = int(time.time()) - start >= client.build_timeout
+ if timed_out:
+ message = ('Volume %s failed to detach from server %s within '
+ 'the required time (%s s) from the compute API '
+ 'perspective' %
+ (volume_id, server_id, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+
+ volumes = client.list_volume_attachments(server_id)[
+ 'volumeAttachments']
+
+ return volumes
+
+
def wait_for_volume_migration(client, volume_id, new_host):
"""Waits for a Volume to move to a new host."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index 0d49b51..382b80f 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -761,11 +761,13 @@
deprecated_reason="This config option is no longer "
"used anywhere, so it can be removed."),
cfg.StrOpt('port_vnic_type',
- choices=[None, 'normal', 'direct', 'macvtap'],
+ choices=[None, 'normal', 'direct', 'macvtap', 'direct-physical',
+ 'baremetal', 'virtio-forwarder'],
help="vnic_type to use when launching instances"
" with pre-configured ports."
" Supported ports are:"
- " ['normal','direct','macvtap']"),
+ " ['normal', 'direct', 'macvtap', 'direct-physical', "
+ "'baremetal', 'virtio-forwarder']"),
cfg.Opt('port_profile',
type=ProfileType,
default={},
diff --git a/tempest/lib/api_schema/response/compute/v2_70/interfaces.py b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
new file mode 100644
index 0000000..3160b92
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import interfaces
+
+# ****** Schemas changed in microversion 2.70 *****************
+#
+# 1. add optional field 'tag' in the Response body of the following APIs:
+# - GET /servers/{server_id}/os-interface
+# - POST /servers/{server_id}/os-interface
+# - GET /servers/{server_id}/os-interface/{port_id}
+
+get_create_interfaces = copy.deepcopy(interfaces.get_create_interfaces)
+get_create_interfaces['response_body']['properties']['interfaceAttachment'][
+ 'properties'].update({'tag': {'type': ['string', 'null']}})
+
+list_interfaces = copy.deepcopy(interfaces.list_interfaces)
+list_interfaces['response_body']['properties']['interfaceAttachments'][
+ 'items']['properties'].update({'tag': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.1 ***
+delete_interface = copy.deepcopy(interfaces.delete_interface)
diff --git a/tempest/lib/services/compute/interfaces_client.py b/tempest/lib/services/compute/interfaces_client.py
index e1c02fa..9244a4a 100644
--- a/tempest/lib/services/compute/interfaces_client.py
+++ b/tempest/lib/services/compute/interfaces_client.py
@@ -16,15 +16,22 @@
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import interfaces as schema
+from tempest.lib.api_schema.response.compute.v2_70 import interfaces as \
+ schemav270
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class InterfacesClient(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.69', 'schema': schema},
+ {'min': '2.70', 'max': None, 'schema': schemav270}]
+
def list_interfaces(self, server_id):
resp, body = self.get('servers/%s/os-interface' % server_id)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -40,6 +47,7 @@
resp, body = self.post('servers/%s/os-interface' % server_id,
body=post_body)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_create_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -47,6 +55,7 @@
resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
port_id))
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_create_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index e9592e6..f369d5d 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -143,10 +143,10 @@
# resp part which is not used in scenario tests
def create_port(self, network_id, client=None, **kwargs):
- """Creates port"""
+ """Creates port for the respective network_id"""
if not client:
client = self.ports_client
- name = data_utils.rand_name(self.__class__.__name__)
+ name = kwargs.pop('namestart', self.__class__.__name__)
if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
if CONF.network.port_profile and 'binding:profile' not in kwargs:
@@ -155,6 +155,7 @@
name=name,
network_id=network_id,
**kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
@@ -200,6 +201,14 @@
direct: an SR-IOV port that is directly attached to a VM
macvtap: an SR-IOV port that is attached to a VM via a macvtap
device.
+ direct-physical: an SR-IOV port that is directly attached to a
+ VM using physical instead of virtual
+ functions.
+ baremetal: a baremetal port directly attached to a baremetal
+ node.
+ virtio-forwarder: an SR-IOV port that is indirectly attached
+ to a VM using a low-latency vhost-user
+ forwarding process.
Defaults to ``CONF.network.port_vnic_type``.
* *port_profile* (``dict``) --
This attribute is a dictionary that can be used (with admin
@@ -495,7 +504,14 @@
self.addCleanup(self._cleanup_volume_type, volume_type)
return volume_type
- def _create_loginable_secgroup_rule(self, secgroup_id=None):
+ def _create_loginable_secgroup_rule(self, secgroup_id=None, rulesets=None):
+ """Create loginable security group rule by compute clients.
+
+ This function will create by default the following rules:
+ 1. tcp port 22 allow rule in order to allow ssh access for ipv4
+ 2. ipv4 icmp allow rule in order to allow icmpv4
+ """
+
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
@@ -508,22 +524,23 @@
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
- rulesets = [
- {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ping
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- }
- ]
+ if not rulesets:
+ rulesets = [
+ {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ },
+ {
+ # ping
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ }
+ ]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
@@ -531,14 +548,16 @@
rules.append(sg_rule)
return rules
- def _create_security_group(self):
+ def _create_security_group(self, **kwargs):
"""Create security group and add rules to security group"""
- sg_name = data_utils.rand_name(self.__class__.__name__)
- sg_desc = sg_name + " description"
+ if not kwargs.get('name'):
+ kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
+ if not kwargs.get('description'):
+ kwargs['description'] = kwargs['name'] + " description"
secgroup = self.compute_security_groups_client.create_security_group(
- name=sg_name, description=sg_desc)['security_group']
- self.assertEqual(secgroup['name'], sg_name)
- self.assertEqual(secgroup['description'], sg_desc)
+ **kwargs)['security_group']
+ self.assertEqual(secgroup['name'], kwargs['name'])
+ self.assertEqual(secgroup['description'], kwargs['description'])
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
@@ -577,7 +596,7 @@
linux_client.validate_authentication()
return linux_client
- def image_create(self, name='scenario-img'):
+ def image_create(self, name='scenario-img', **kwargs):
img_path = CONF.scenario.img_file
if not os.path.exists(img_path):
# TODO(kopecmartin): replace LOG.warning for rasing
@@ -617,6 +636,7 @@
# Additional properties are flattened out in the v2 API.
if img_properties:
params.update(img_properties)
+ params.update(kwargs)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
@@ -653,7 +673,7 @@
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
- def create_server_snapshot(self, server, name=None):
+ def create_server_snapshot(self, server, name=None, **kwargs):
"""Creates server snapshot"""
# Glance client
_image_client = self.image_client
@@ -662,7 +682,7 @@
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
- image = _images_client.create_image(server['id'], name=name)
+ image = _images_client.create_image(server['id'], name=name, **kwargs)
image_id = image.response['location'].split('images/')[1]
waiters.wait_for_image_status(_image_client, image_id, 'active')
@@ -859,15 +879,25 @@
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None, server=None, username=None):
"""Returns timestamp
This wrapper utility does ssh and returns the timestamp.
+
+ :param ip_address: The floating IP or fixed IP of the remote server
+ :param dev_name: Name of the device that stores the timestamp
+ :param mount_path: Path which should be used as mount point for
+ dev_name
+ :param private_key: The SSH private key to use for authentication
+ :param server: Server dict, used for debugging purposes
+ :param username: Name of the Linux account on the remote server
"""
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
- server=server)
+ server=server,
+ username=username)
+
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
@@ -954,12 +984,21 @@
return self.create_server(**create_kwargs)
- def create_volume_from_image(self):
- """Create volume from image"""
- img_uuid = CONF.compute.image_ref
- vol_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
+ def create_volume_from_image(self, **kwargs):
+ """Create volume from image.
+
+ :param image_id: ID of the image to create volume from,
+ CONF.compute.image_ref by default
+ :param name: name of the volume,
+ '$classname-volume-origin' by default
+ :param **kwargs: additional parameters
+ """
+ image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+ name = kwargs.pop('name', None)
+ if not name:
+ namestart = self.__class__.__name__ + '-volume-origin'
+ name = data_utils.rand_name(namestart)
+ return self.create_volume(name=name, imageRef=image_id, **kwargs)
class NetworkScenarioTest(ScenarioTest):
@@ -1154,6 +1193,32 @@
floating_ip['id'])
return floating_ip
+ def associate_floating_ip(self, floating_ip, server):
+ """Associate floating ip
+
+ This wrapper utility attaches the floating_ip for
+ the respective port_id of server
+ """
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ kwargs = dict(port_id=port_id)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ return floating_ip
+
+ def disassociate_floating_ip(self, floating_ip):
+ """Disassociates floating ip
+
+ This wrapper utility disassociates given floating ip.
+ :param floating_ip: a dict which is a return value of
+ floating_ips_client.create_floatingip method
+ """
+ kwargs = dict(port_id=None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ return floating_ip
+
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
@@ -1357,7 +1422,7 @@
def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
secgroup=None,
security_groups_client=None):
- """Create loginable security group rule
+ """Create loginable security group rule by neutron clients by default.
This function will create:
1. egress and ingress tcp port 22 allow rule in order to allow ssh
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c3b3670..a8e4c30 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
import testtools
from tempest.common import utils
@@ -24,7 +23,6 @@
from tempest.scenario import manager
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class TestStampPattern(manager.ScenarioTest):
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f45eec0..ff74877 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import servers_client
from tempest.lib.services.volume.v2 import volumes_client
from tempest.tests import base
import tempest.tests.utils as utils
@@ -384,3 +385,54 @@
uuids.attachment_id)
# Assert that show volume is only called once before we return
show_volume.assert_called_once_with(uuids.volume_id)
+
+ def test_wait_for_volume_attachment_remove_from_server(self):
+ volume_attached = {
+ "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+ volume_not_attached = {"volumeAttachments": []}
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=[volume_attached, volume_not_attached])
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_interval=1,
+ build_timeout=1,
+ list_volume_attachments=mock_list_volume_attachments)
+ self.patch(
+ 'time.time',
+ side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ waiters.wait_for_volume_attachment_remove_from_server(
+ mock_client, uuids.server_id, uuids.volume_id)
+
+ # Assert that list_volume_attachments is called until the attachment is
+ # removed.
+ mock_list_volume_attachments.assert_has_calls([
+ mock.call(uuids.server_id),
+ mock.call(uuids.server_id)])
+
+ def test_wait_for_volume_attachment_remove_from_server_timeout(self):
+ volume_attached = {
+ "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=[volume_attached, volume_attached])
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_interval=1,
+ build_timeout=1,
+ list_volume_attachments=mock_list_volume_attachments)
+ self.patch(
+ 'time.time',
+ side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ self.assertRaises(
+ lib_exc.TimeoutException,
+ waiters.wait_for_volume_attachment_remove_from_server,
+ mock_client, uuids.server_id, uuids.volume_id)
+
+ # Assert that list_volume_attachments is called until the attachment is
+ # removed.
+ mock_list_volume_attachments.assert_has_calls([
+ mock.call(uuids.server_id),
+ mock.call(uuids.server_id)])
diff --git a/tox.ini b/tox.ini
index d8e059a..80d5de2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,7 +11,7 @@
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/victoria}
-r{toxinidir}/requirements.txt
[testenv]
@@ -28,7 +28,7 @@
install_command = pip install {opts} {packages}
whitelist_externals = *
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/victoria}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
@@ -263,7 +263,7 @@
[testenv:venv]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/victoria}
-r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands = {posargs}
@@ -278,7 +278,7 @@
[testenv:docs]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/victoria}
-r{toxinidir}/doc/requirements.txt
commands =
sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
@@ -363,7 +363,7 @@
[testenv:releasenotes]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/victoria}
-r{toxinidir}/doc/requirements.txt
commands =
rm -rf releasenotes/build