Merge "Decentralize the zuul jobs into zuul.d"
diff --git a/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
new file mode 100644
index 0000000..121e060
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new config option ``[compute-feature-enabled] shelve_migrate``
+ which enable test for environment that support cold migration of qcow2
+ unshelved instance.
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
new file mode 100644
index 0000000..9340997
--- /dev/null
+++ b/tempest/api/compute/admin/test_volume.py
@@ -0,0 +1,118 @@
+# Copyright 2020 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from tempest.api.compute import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseAttachSCSIVolumeTest(base.BaseV2ComputeAdminTest):
+ """Base class for the admin volume tests in this module."""
+ create_default_network = True
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseAttachSCSIVolumeTest, cls).skip_checks()
+ if not CONF.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
+
+ def _create_image_with_custom_property(self, **kwargs):
+ """Wrapper utility that returns the custom image.
+
+ Creates a new image by downloading the default image's bits and
+ uploading them to a new image. Any kwargs are set as image properties
+ on the new image.
+
+ :param return image_id: The UUID of the newly created image.
+ """
+ image = self.image_client.show_image(CONF.compute.image_ref)
+ image_data = self.image_client.show_image_file(
+ CONF.compute.image_ref).data
+ image_file = six.BytesIO(image_data)
+ create_dict = {
+ 'container_format': image['container_format'],
+ 'disk_format': image['disk_format'],
+ 'min_disk': image['min_disk'],
+ 'min_ram': image['min_ram'],
+ 'visibility': 'public',
+ }
+ create_dict.update(kwargs)
+ new_image = self.image_client.create_image(**create_dict)
+ self.addCleanup(self.image_client.wait_for_resource_deletion,
+ new_image['id'])
+ self.addCleanup(self.image_client.delete_image, new_image['id'])
+ self.image_client.store_image_file(new_image['id'], image_file)
+
+ return new_image['id']
+
+
+class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
+ """Test attaching scsi volume to server"""
+
+ @decorators.idempotent_id('777e468f-17ca-4da4-b93d-b7dbf56c0494')
+ def test_attach_scsi_disk_with_config_drive(self):
+ """Test the attach/detach volume with config drive/scsi disk
+
+ Enable the config drive, followed by booting an instance
+ from an image with meta properties hw_cdrom: scsi and use
+ virtio-scsi mode with further asserting list volume attachments
+ in instance after attach and detach of the volume.
+ """
+ custom_img = self._create_image_with_custom_property(
+ hw_scsi_model='virtio-scsi',
+ hw_disk_bus='scsi',
+ hw_cdrom_bus='scsi')
+ server = self.create_test_server(image_id=custom_img,
+ config_drive=True,
+ wait_until='ACTIVE')
+
+ # NOTE(lyarwood): self.create_test_server delete the server
+ # at class level cleanup so add server cleanup to ensure that
+ # the instance is deleted first before created image. This
+ # avoids failures when using the rbd backend is used for both
+ # Glance and Nova ephemeral storage. Also wait until server is
+ # deleted otherwise image deletion can start before server is
+ # deleted.
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(self.servers_client.delete_server, server['id'])
+
+ volume = self.create_volume()
+ attachment = self.attach_volume(server, volume)
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, attachment['volumeId'], 'in-use')
+ volume_after_attach = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(1, len(volume_after_attach),
+ "Failed to attach volume")
+ self.servers_client.detach_volume(
+ server['id'], attachment['volumeId'])
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, attachment['volumeId'], 'available')
+ volume_after_detach = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(0, len(volume_after_detach),
+ "Failed to detach volume")
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index d19b4cd..bb0f5ad 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -637,6 +637,7 @@
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
+ cls.image_client = cls.os_admin.image_client_v2
def create_flavor(self, ram, vcpus, disk, name=None,
is_public='True', **kwargs):
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 7931ca9..6ebdbdb 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -26,11 +26,6 @@
CONF = config.CONF
-if six.PY2:
- ord_func = ord
-else:
- ord_func = int
-
class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
"""Test novnc console"""
@@ -116,14 +111,14 @@
# single word(4 bytes).
self.assertEqual(
data_length, 4, 'Expected authentication type None.')
- self.assertIn(1, [ord_func(data[i]) for i in (0, 3)],
+ self.assertIn(1, [int(data[i]) for i in (0, 3)],
'Expected authentication type None.')
else:
self.assertGreaterEqual(
len(data), 2, 'Expected authentication type None.')
self.assertIn(
1,
- [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
+ [int(data[i + 1]) for i in range(int(data[0]))],
'Expected authentication type None.')
# Send to the server that we only support authentication
# type None
@@ -136,7 +131,7 @@
len(data), 4,
'Server did not think security was successful.')
self.assertEqual(
- [ord_func(i) for i in data], [0, 0, 0, 0],
+ [int(i) for i in data], [0, 0, 0, 0],
'Server did not think security was successful.')
# Say to leave the desktop as shared as part of client initialization
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index be13ed7..fc9b1a2 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -182,6 +182,7 @@
self.assertEqual(data, body)
@decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
+ @decorators.skip_because(bug='1905432')
def test_create_object_with_transfer_encoding(self):
"""Test creating object with transfer_encoding"""
object_name = data_utils.rand_name(name='TestObject')
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index ff552a1..917262e 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -270,7 +270,7 @@
config.CONF.set_config_path(parsed_args.config_file)
setup_logging()
resources = []
- for count in range(parsed_args.concurrency):
+ for _ in range(parsed_args.concurrency):
# Use N different cred_providers to obtain different
# sets of creds
cred_provider = get_credential_provider(parsed_args)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index d82b6df..8bebce2 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -129,7 +129,6 @@
from cliff import command
from oslo_serialization import jsonutils as json
-import six
from stestr import commands
from tempest import clients
@@ -139,10 +138,6 @@
from tempest.common import credentials_factory as credentials
from tempest import config
-if six.PY2:
- # Python 2 has not FileNotFoundError exception
- FileNotFoundError = IOError
-
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
@@ -167,7 +162,7 @@
# environment variable and fall back to "python", under python3
# if it does not exist. we should set it to the python3 executable
# to deal with this situation better for now.
- if six.PY3 and 'PYTHON' not in os.environ:
+ if 'PYTHON' not in os.environ:
os.environ['PYTHON'] = sys.executable
def _create_stestr_conf(self):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index da3a4a9..42f68f1 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -19,7 +19,6 @@
import struct
import textwrap
-import six
from six.moves.urllib import parse as urlparse
from oslo_log import log as logging
@@ -31,11 +30,6 @@
from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
-if six.PY2:
- ord_func = ord
-else:
- ord_func = int
-
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -371,8 +365,8 @@
# frames less than 125 bytes here (for the negotiation) and
# that only the 2nd byte contains the length, and since the
# server doesn't do masking, we can just read the data length
- if ord_func(header[1]) & 127 > 0:
- return self._recv(ord_func(header[1]) & 127)
+ if int(header[1]) & 127 > 0:
+ return self._recv(int(header[1]) & 127)
def send_frame(self, data):
"""Wrapper for sending data to add in the WebSocket frame format."""
@@ -389,7 +383,7 @@
frame_bytes.append(mask[i])
# Mask each of the actual data bytes that we are going to send
for i in range(len(data)):
- frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
+ frame_bytes.append(int(data[i]) ^ mask[i % 4])
# Convert our integer list to a binary array of bytes
frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
self._socket.sendall(frame_bytes)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 17796df..625e08e 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -207,8 +207,8 @@
time.sleep(client.build_interval)
- message = ('Image %(image_id)s failed to import '
- 'on stores: %s' % str(image['os_glance_failed_import']))
+ message = ('Image %s failed to import on stores: %s' %
+ (image_id, str(image['os_glance_failed_import'])))
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
@@ -238,8 +238,8 @@
time.sleep(client.build_interval)
- message = ('Image %(image_id)s failed to finish the copy operation '
- 'on stores: %s' % str(store_left))
+ message = ('Image %s failed to finish the copy operation '
+ 'on stores: %s' % (image_id, str(store_left)))
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
diff --git a/tempest/config.py b/tempest/config.py
index 26a7fab..3761d8e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -452,6 +452,10 @@
cfg.BoolOpt('shelve',
default=True,
help="Does the test environment support shelving/unshelving?"),
+ cfg.BoolOpt('shelve_migrate',
+ default=False,
+ help="Does the test environment support "
+ "cold migration of unshelved server?"),
cfg.BoolOpt('suspend',
default=True,
help="Does the test environment support suspend/resume?"),
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 3fee489..7c279ab 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -391,7 +391,7 @@
"""
if auth_data is None:
auth_data = self.get_auth()
- token, _auth_data = auth_data
+ _, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
name = filters.get('name')
@@ -524,7 +524,7 @@
"""
if auth_data is None:
auth_data = self.get_auth()
- token, _auth_data = auth_data
+ _, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
name = filters.get('name')
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index d8c776b..c661d21 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -18,7 +18,6 @@
import subprocess
from oslo_log import log as logging
-import six
from tempest.lib import base
import tempest.lib.cli.output_parser
@@ -55,8 +54,6 @@
flags, action, params])
cmd = cmd.strip()
LOG.info("running: '%s'", cmd)
- if six.PY2:
- cmd = cmd.encode('utf-8')
cmd = shlex.split(cmd)
stdout = subprocess.PIPE
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
@@ -67,10 +64,7 @@
cmd,
result,
result_err)
- if six.PY2:
- return result
- else:
- return os.fsdecode(result)
+ return os.fsdecode(result)
class CLIClient(object):
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index b34066f..ff09671 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -225,8 +225,7 @@
def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
import_list = [node for node in src_parsed.body
- if isinstance(node, ast.Import) or
- isinstance(node, ast.ImportFrom)]
+ if isinstance(node, (ast.Import, ast.ImportFrom))]
if not import_list:
print("(WARNING) %s: The file is not valid as it does not contain "
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 0513e90..b47b511 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -507,7 +507,7 @@
if not hasattr(body, "keys") or len(body.keys()) != 1:
return body
# Just return the "wrapped" element
- first_key, first_item = six.next(six.iteritems(body))
+ _, first_item = six.next(six.iteritems(body))
if isinstance(first_item, (dict, list)):
return first_item
except (ValueError, IndexError):
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
index b47d40d..ef0ec73 100644
--- a/tempest/lib/common/thread.py
+++ b/tempest/lib/common/thread.py
@@ -13,13 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
-if six.PY2:
- # module thread is removed in Python 3
- from thread import get_ident # noqa: H237,F401
-
-else:
- # On Python3 thread module has been deprecated and get_ident has been moved
- # to threading module
- from threading import get_ident # noqa: F401
+# On Python3 thread module has been deprecated and get_ident has been moved
+# to threading module
+from threading import get_ident # noqa: F401
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 7f94612..44b55eb 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -129,7 +129,7 @@
:rtype: string
"""
guid = []
- for i in range(8):
+ for _ in range(8):
guid.append("%02x" % random.randint(0x00, 0xff))
return ':'.join(guid)
diff --git a/tempest/lib/services/identity/v3/groups_client.py b/tempest/lib/services/identity/v3/groups_client.py
index f823b21..2cfb24a 100644
--- a/tempest/lib/services/identity/v3/groups_client.py
+++ b/tempest/lib/services/identity/v3/groups_client.py
@@ -110,6 +110,6 @@
def check_group_user_existence(self, group_id, user_id):
"""Check user in group."""
- resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
+ resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 3949437..f937ed6 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -51,7 +51,7 @@
def check_user_inherited_project_role_on_domain(
self, domain_id, user_id, role_id):
"""Checks whether a user has an inherited project role on a domain."""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/domains/%s/users/%s/roles/%s/inherited_to_projects"
% (domain_id, user_id, role_id))
self.expected_success(204, resp.status)
@@ -88,7 +88,7 @@
def check_group_inherited_project_role_on_domain(
self, domain_id, group_id, role_id):
"""Checks whether a group has an inherited project role on a domain."""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/domains/%s/groups/%s/roles/%s/inherited_to_projects"
% (domain_id, group_id, role_id))
self.expected_success(204, resp.status)
@@ -115,7 +115,7 @@
def check_user_has_flag_on_inherited_to_project(
self, project_id, user_id, role_id):
"""Check if user has an inherited project role on project"""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
% (project_id, user_id, role_id))
self.expected_success(204, resp.status)
@@ -142,7 +142,7 @@
def check_group_has_flag_on_inherited_to_project(
self, project_id, group_id, role_id):
"""Check if group has an inherited project role on project"""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
% (project_id, group_id, role_id))
self.expected_success(204, resp.status)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index 6ca401b..722deca 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -71,7 +71,7 @@
normalized_params = '&'.join(parameter_parts)
# normalize_uri
- scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
+ scheme, netloc, path, params, _, _ = urlparse.urlparse(uri)
scheme = scheme.lower()
netloc = netloc.lower()
path = path.replace('//', '/')
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index f9356be..0d7593a 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -122,16 +122,16 @@
def check_user_role_existence_on_project(self, project_id,
user_id, role_id):
"""Check role of a user on a project."""
- resp, body = self.head('projects/%s/users/%s/roles/%s' %
- (project_id, user_id, role_id))
+ resp, _ = self.head('projects/%s/users/%s/roles/%s' %
+ (project_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def check_user_role_existence_on_domain(self, domain_id,
user_id, role_id):
"""Check role of a user on a domain."""
- resp, body = self.head('domains/%s/users/%s/roles/%s' %
- (domain_id, user_id, role_id))
+ resp, _ = self.head('domains/%s/users/%s/roles/%s' %
+ (domain_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
@@ -182,16 +182,16 @@
def check_role_from_group_on_project_existence(self, project_id,
group_id, role_id):
"""Check role of a group on a project."""
- resp, body = self.head('projects/%s/groups/%s/roles/%s' %
- (project_id, group_id, role_id))
+ resp, _ = self.head('projects/%s/groups/%s/roles/%s' %
+ (project_id, group_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def check_role_from_group_on_domain_existence(self, domain_id,
group_id, role_id):
"""Check role of a group on a domain."""
- resp, body = self.head('domains/%s/groups/%s/roles/%s' %
- (domain_id, group_id, role_id))
+ resp, _ = self.head('domains/%s/groups/%s/roles/%s' %
+ (domain_id, group_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
@@ -232,14 +232,14 @@
def check_role_inference_rule(self, prior_role, implies_role):
"""Check a role inference rule."""
- resp, body = self.head('roles/%s/implies/%s' %
- (prior_role, implies_role))
+ resp, _ = self.head('roles/%s/implies/%s' %
+ (prior_role, implies_role))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_role_inference_rule(self, prior_role, implies_role):
"""Delete a role inference rule."""
- resp, body = self.delete('roles/%s/implies/%s' %
- (prior_role, implies_role))
+ resp, _ = self.delete('roles/%s/implies/%s' %
+ (prior_role, implies_role))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
index 4ed5eb1..2efb0da 100644
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -302,5 +302,5 @@
def retype_volume(self, volume_id, **kwargs):
"""Updates volume with new volume type."""
post_body = json.dumps({'os-retype': kwargs})
- resp, body = self.post('volumes/%s/action' % volume_id, post_body)
+ resp, _ = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index ff860d5..3c37b70 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -160,7 +160,7 @@
client.delete_port, port['id'])
return port
- def create_keypair(self, client=None):
+ def create_keypair(self, client=None, **kwargs):
"""Creates keypair
Keypair is a public key of OpenSSH key pair used for accessing
@@ -170,10 +170,11 @@
"""
if not client:
client = self.keypairs_client
- name = data_utils.rand_name(self.__class__.__name__)
+ if not kwargs.get('name'):
+ kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
- body = client.create_keypair(name=name)
- self.addCleanup(client.delete_keypair, name)
+ body = client.create_keypair(**kwargs)
+ self.addCleanup(client.delete_keypair, kwargs['name'])
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
@@ -306,7 +307,7 @@
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
- imageRef=None, volume_type=None):
+ imageRef=None, volume_type=None, **kwargs):
"""Creates volume
This wrapper utility creates volume and waits for volume to be
@@ -326,11 +327,11 @@
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
- kwargs = {'display_name': name,
- 'snapshot_id': snapshot_id,
- 'imageRef': imageRef,
- 'volume_type': volume_type,
- 'size': size}
+ kwargs.update({'name': name,
+ 'snapshot_id': snapshot_id,
+ 'imageRef': imageRef,
+ 'volume_type': volume_type,
+ 'size': size})
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
@@ -422,7 +423,7 @@
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume_id,
force=force,
- display_name=name,
+ name=name,
description=description,
metadata=metadata)['snapshot']
@@ -625,7 +626,7 @@
LOG.debug("image:%s", image['id'])
return image['id']
- def _log_console_output(self, servers=None, client=None):
+ def _log_console_output(self, servers=None, client=None, **kwargs):
"""Console log output"""
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
@@ -637,7 +638,7 @@
for server in servers:
try:
console_output = client.get_console_output(
- server['id'])['output']
+ server['id'], **kwargs)['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
except lib_exc.NotFound:
@@ -697,17 +698,20 @@
image_name, server['name'])
return snapshot_image
- def nova_volume_attach(self, server, volume_to_attach):
+ def nova_volume_attach(self, server, volume_to_attach, **kwargs):
"""Compute volume attach
This utility attaches volume from compute and waits for the
volume status to be 'in-use' state.
"""
volume = self.servers_client.attach_volume(
- server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
+ server['id'], volumeId=volume_to_attach['id'],
+ **kwargs)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.nova_volume_detach, server, volume)
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
@@ -810,13 +814,15 @@
LOG.exception(extra_msg)
raise
- def create_floating_ip(self, server, pool_name=None):
+ def create_floating_ip(self, server, pool_name=None, **kwargs):
"""Create a floating IP and associates to a server on Nova"""
if not pool_name:
pool_name = CONF.network.floating_network_name
+
floating_ip = (self.compute_floating_ips_client.
- create_floating_ip(pool=pool_name)['floating_ip'])
+ create_floating_ip(pool=pool_name,
+ **kwargs)['floating_ip'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
@@ -825,18 +831,20 @@
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None, server=None, username=None,
+ fs='ext4'):
"""Creates timestamp
This wrapper utility does ssh, creates timestamp and returns the
created timestamp.
"""
-
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
- server=server)
+ server=server,
+ username=username)
+
if dev_name is not None:
- ssh_client.make_fs(dev_name)
+ ssh_client.make_fs(dev_name, fs=fs)
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
@@ -865,18 +873,22 @@
ssh_client.exec_command('sudo umount %s' % mount_path)
return timestamp
- def get_server_ip(self, server):
+ def get_server_ip(self, server, **kwargs):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
+
+ If CONF.validation.connect_method is floating, then
+ a floating ip will be created passing kwargs as additional
+ argument.
"""
if CONF.validation.connect_method == 'floating':
# The tests calling this method don't have a floating IP
# and can't make use of the validation resources. So the
# method is creating the floating IP there.
- return self.create_floating_ip(server)['ip']
+ return self.create_floating_ip(server, **kwargs)['ip']
elif CONF.validation.connect_method == 'fixed':
# Determine the network name to look for based on config or creds
# provider network resources.
@@ -916,14 +928,14 @@
keypair=None,
security_group=None,
delete_on_termination=False,
- name=None):
+ name=None, **kwargs):
"""Boot instance from resource
This wrapper utility boots instance from resource with block device
mapping with source info passed in arguments
"""
- create_kwargs = dict()
+ create_kwargs = dict({'image_id': ''})
if keypair:
create_kwargs['key_name'] = keypair['name']
if security_group:
@@ -935,8 +947,9 @@
delete_on_termination=delete_on_termination))
if name:
create_kwargs['name'] = name
+ create_kwargs.update(kwargs)
- return self.create_server(image_id='', **create_kwargs)
+ return self.create_server(**create_kwargs)
def create_volume_from_image(self):
"""Create volume from image"""
@@ -1063,14 +1076,13 @@
return subnet
- def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- if ip_addr:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'],
- fixed_ips='ip_address=%s' % ip_addr)['ports']
- else:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'])['ports']
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None, **kwargs):
+
+ if ip_addr and not kwargs.get('fixed_ips'):
+ kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], **kwargs)['ports']
+
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
@@ -1109,7 +1121,7 @@
return net[0]
def create_floating_ip(self, server, external_network_id=None,
- port_id=None, client=None):
+ port_id=None, client=None, **kwargs):
"""Create a floating IP and associates to a resource/port on Neutron"""
if not external_network_id:
@@ -1121,15 +1133,17 @@
else:
ip4 = None
- kwargs = {
+ floatingip_kwargs = {
'floating_network_id': external_network_id,
'port_id': port_id,
'tenant_id': server.get('project_id') or server['tenant_id'],
'fixed_ip_address': ip4,
}
if CONF.network.subnet_id:
- kwargs['subnet_id'] = CONF.network.subnet_id
- result = client.create_floatingip(**kwargs)
+ floatingip_kwargs['subnet_id'] = CONF.network.subnet_id
+
+ floatingip_kwargs.update(kwargs)
+ result = client.create_floatingip(**floatingip_kwargs)
floating_ip = result['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index b515639..58e234f 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -51,10 +51,27 @@
return aggregate
def _get_host_name(self):
+ # Find a host that has not been added to other availability zone,
+ # for one host can't be added to different availability zones.
svc_list = self.services_client.list_services(
binary='nova-compute')['services']
self.assertNotEmpty(svc_list)
- return svc_list[0]['host']
+ hosts_available = []
+ for host in svc_list:
+ if (host['state'] == 'up' and host['status'] == 'enabled'):
+ hosts_available.append(host['host'])
+ aggregates = self.aggregates_client.list_aggregates()['aggregates']
+ hosts_in_zone = []
+ for agg in aggregates:
+ if agg['availability_zone']:
+ hosts_in_zone.extend(agg['hosts'])
+ hosts = [v for v in hosts_available if v not in hosts_in_zone]
+ if not hosts:
+ raise self.skipException("All hosts are already in other "
+ "availability zones, so can't add "
+ "host to aggregate. \nAggregates list: "
+ "%s" % aggregates)
+ return hosts[0]
def _add_host(self, aggregate_id, host):
aggregate = (self.aggregates_client.add_host(aggregate_id, host=host)
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index d6b6d14..ed06898 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -33,9 +33,18 @@
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
+ * check the existence of the timestamp file in the unshelved instance,
+ after a cold migrate
"""
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def setup_clients(cls):
+ super(TestShelveInstance, cls).setup_clients()
+ cls.admin_servers_client = cls.os_admin.servers_client
+
@classmethod
def skip_checks(cls):
super(TestShelveInstance, cls).skip_checks()
@@ -50,7 +59,21 @@
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
- def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False):
+ def _cold_migrate_server(self, server):
+ src_host = self.get_host_for_server(server['id'])
+
+ self.admin_servers_client.migrate_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ dst_host = self.get_host_for_server(server['id'])
+ self.assertNotEqual(src_host, dst_host)
+
+ def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False,
+ cold_migrate=False):
keypair = self.create_keypair()
security_group = self._create_security_group()
@@ -71,6 +94,10 @@
# with the instance snapshot
self._shelve_then_unshelve_server(server)
+ if cold_migrate:
+ # Prevent bug #1732428 from coming back
+ self._cold_migrate_server(server)
+
timestamp2 = self.get_timestamp(instance_ip,
private_key=keypair['private_key'],
server=server)
@@ -91,3 +118,18 @@
@utils.services('compute', 'volume', 'network', 'image')
def test_shelve_volume_backed_instance(self):
self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
+
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('1295fd9e-193a-4cf8-b211-55358e021bae')
+ @testtools.skipUnless(CONF.network.public_network_id,
+ 'The public_network_id option must be specified.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.shelve_migrate,
+ 'Shelve migrate not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @utils.services('compute', 'network', 'image')
+ def test_cold_migrate_unshelved_instance(self):
+ self._create_server_then_shelve_and_unshelve(cold_migrate=True)
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 5d9ddfa..3c99bbe 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -29,10 +29,6 @@
from tempest.lib.common.utils import data_utils
from tempest.tests import base
-if six.PY2:
- # Python 2 has not FileNotFoundError exception
- FileNotFoundError = IOError
-
DEVNULL = open(os.devnull, 'wb')
atexit.register(DEVNULL.close)
@@ -149,8 +145,7 @@
]
# NOTE(mtreinish): on python 3 the subprocess prints b'' around
# stdout.
- if six.PY3:
- result = ["b\'" + x + "\'" for x in result]
+ result = ["b\'" + x + "\'" for x in result]
self.assertEqual(result, tests)
def test_tempest_run_with_worker_file(self):
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f03c7cc..f45eec0 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -55,6 +55,56 @@
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
+ def test_wait_for_image_imported_to_stores(self):
+ self.client.show_image.return_value = ({'status': 'active',
+ 'stores': 'fake_store'})
+ start_time = int(time.time())
+ waiters.wait_for_image_imported_to_stores(
+ self.client, 'fake_image_id', 'fake_store')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertLess((end_time - start_time), 10)
+
+ def test_wait_for_image_imported_to_stores_timeout(self):
+ time_mock = self.patch('time.time')
+ client = mock.MagicMock()
+ client.build_timeout = 2
+ self.patch('time.time', side_effect=[0., 1., 2.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ client.show_image.return_value = ({
+ 'status': 'saving',
+ 'stores': 'fake_store',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_image_imported_to_stores,
+ client, 'fake_image_id', 'fake_store')
+
+ def test_wait_for_image_copied_to_stores(self):
+ self.client.show_image.return_value = ({
+ 'status': 'active',
+ 'os_glance_importing_to_stores': '',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ start_time = int(time.time())
+ waiters.wait_for_image_copied_to_stores(
+ self.client, 'fake_image_id')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertLess((end_time - start_time), 10)
+
+ def test_wait_for_image_copied_to_stores_timeout(self):
+ time_mock = self.patch('time.time')
+ self.patch('time.time', side_effect=[0., 1.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ self.client.show_image.return_value = ({
+ 'status': 'active',
+ 'os_glance_importing_to_stores': 'processing',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_image_copied_to_stores,
+ self.client, 'fake_image_id')
+
class TestInterfaceWaiters(base.TestCase):