blob: 70c25ae3ca63769b8214a2f602a790c60d7931d2 [file] [log] [blame]
Rajat Dhasmana21d63a32020-01-14 17:41:22 +00001# TODO: Remove this file when tempest scenario manager becomes stable
2# Copyright 2012 OpenStack Foundation
3# Copyright 2013 IBM Corp.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18import netaddr
19from oslo_log import log
20from oslo_serialization import jsonutils as json
21from oslo_utils import netutils
22
23from tempest.common import compute
24from tempest.common import image as common_image
25from tempest.common.utils.linux import remote_client
26from tempest.common import waiters
27from tempest import config
28from tempest import exceptions
29from tempest.lib.common import api_microversion_fixture
30from tempest.lib.common import api_version_utils
31from tempest.lib.common.utils import data_utils
32from tempest.lib.common.utils import test_utils
33from tempest.lib import exceptions as lib_exc
34import tempest.test
35
36CONF = config.CONF
37
38LOG = log.getLogger(__name__)
39
40LATEST_MICROVERSION = 'latest'
41
42
43class ScenarioTest(tempest.test.BaseTestCase):
44 """Base class for scenario tests. Uses tempest own clients. """
45
46 credentials = ['primary']
47
48 compute_min_microversion = None
49 compute_max_microversion = LATEST_MICROVERSION
50 volume_min_microversion = None
51 volume_max_microversion = LATEST_MICROVERSION
52 placement_min_microversion = None
53 placement_max_microversion = LATEST_MICROVERSION
54
55 @classmethod
56 def skip_checks(cls):
57 super(ScenarioTest, cls).skip_checks()
58 api_version_utils.check_skip_with_microversion(
59 cls.compute_min_microversion, cls.compute_max_microversion,
60 CONF.compute.min_microversion, CONF.compute.max_microversion)
61 api_version_utils.check_skip_with_microversion(
62 cls.volume_min_microversion, cls.volume_max_microversion,
63 CONF.volume.min_microversion, CONF.volume.max_microversion)
64 api_version_utils.check_skip_with_microversion(
65 cls.placement_min_microversion, cls.placement_max_microversion,
66 CONF.placement.min_microversion, CONF.placement.max_microversion)
67
68 @classmethod
69 def resource_setup(cls):
70 super(ScenarioTest, cls).resource_setup()
71 cls.compute_request_microversion = (
72 api_version_utils.select_request_microversion(
73 cls.compute_min_microversion,
74 CONF.compute.min_microversion))
75 cls.volume_request_microversion = (
76 api_version_utils.select_request_microversion(
77 cls.volume_min_microversion,
78 CONF.volume.min_microversion))
79 cls.placement_request_microversion = (
80 api_version_utils.select_request_microversion(
81 cls.placement_min_microversion,
82 CONF.placement.min_microversion))
83
84 def setUp(self):
85 super(ScenarioTest, self).setUp()
86 self.useFixture(api_microversion_fixture.APIMicroversionFixture(
87 compute_microversion=self.compute_request_microversion,
88 volume_microversion=self.volume_request_microversion,
89 placement_microversion=self.placement_request_microversion))
90
91 @classmethod
92 def setup_clients(cls):
93 super(ScenarioTest, cls).setup_clients()
94 # Clients (in alphabetical order)
95 cls.flavors_client = cls.os_primary.flavors_client
96 cls.compute_floating_ips_client = (
97 cls.os_primary.compute_floating_ips_client)
98 if CONF.service_available.glance:
99 # Check if glance v1 is available to determine which client to use.
100 if CONF.image_feature_enabled.api_v1:
101 cls.image_client = cls.os_primary.image_client
102 elif CONF.image_feature_enabled.api_v2:
103 cls.image_client = cls.os_primary.image_client_v2
104 else:
105 raise lib_exc.InvalidConfiguration(
106 'Either api_v1 or api_v2 must be True in '
107 '[image-feature-enabled].')
108 # Compute image client
109 cls.compute_images_client = cls.os_primary.compute_images_client
110 cls.keypairs_client = cls.os_primary.keypairs_client
111 # Nova security groups client
112 cls.compute_security_groups_client = (
113 cls.os_primary.compute_security_groups_client)
114 cls.compute_security_group_rules_client = (
115 cls.os_primary.compute_security_group_rules_client)
116 cls.servers_client = cls.os_primary.servers_client
117 cls.interface_client = cls.os_primary.interfaces_client
118 # Neutron network client
119 cls.networks_client = cls.os_primary.networks_client
120 cls.ports_client = cls.os_primary.ports_client
121 cls.routers_client = cls.os_primary.routers_client
122 cls.subnets_client = cls.os_primary.subnets_client
123 cls.floating_ips_client = cls.os_primary.floating_ips_client
124 cls.security_groups_client = cls.os_primary.security_groups_client
125 cls.security_group_rules_client = (
126 cls.os_primary.security_group_rules_client)
127 # Use the latest available volume clients
128 if CONF.service_available.cinder:
129 cls.volumes_client = cls.os_primary.volumes_client_latest
130 cls.snapshots_client = cls.os_primary.snapshots_client_latest
131 cls.backups_client = cls.os_primary.backups_client_latest
132
133 # ## Test functions library
134 #
135 # The create_[resource] functions only return body and discard the
136 # resp part which is not used in scenario tests
137
138 def create_keypair(self, client=None):
139 if not client:
140 client = self.keypairs_client
141 name = data_utils.rand_name(self.__class__.__name__)
142 # We don't need to create a keypair by pubkey in scenario
143 body = client.create_keypair(name=name)
144 self.addCleanup(client.delete_keypair, name)
145 return body['keypair']
146
147 def create_server(self, name=None, image_id=None, flavor=None,
148 validatable=False, wait_until='ACTIVE',
149 clients=None, **kwargs):
150 """Wrapper utility that returns a test server.
151
152 This wrapper utility calls the common create test server and
153 returns a test server. The purpose of this wrapper is to minimize
154 the impact on the code of the tests already using this
155 function.
156
157 :param **kwargs:
158 See extra parameters below
159
160 :Keyword Arguments:
161 * *vnic_type* (``string``) --
162 used when launching instances with pre-configured ports.
163 Examples:
164 normal: a traditional virtual port that is either attached
165 to a linux bridge or an openvswitch bridge on a
166 compute node.
167 direct: an SR-IOV port that is directly attached to a VM
168 macvtap: an SR-IOV port that is attached to a VM via a macvtap
169 device.
170 Defaults to ``CONF.network.port_vnic_type``.
171 * *port_profile* (``dict``) --
172 This attribute is a dictionary that can be used (with admin
173 credentials) to supply information influencing the binding of
174 the port.
175 example: port_profile = "capabilities:[switchdev]"
176 Defaults to ``CONF.network.port_profile``.
177 """
178
179 # NOTE(jlanoux): As a first step, ssh checks in the scenario
180 # tests need to be run regardless of the run_validation and
181 # validatable parameters and thus until the ssh validation job
182 # becomes voting in CI. The test resources management and IP
183 # association are taken care of in the scenario tests.
184 # Therefore, the validatable parameter is set to false in all
185 # those tests. In this way create_server just return a standard
186 # server and the scenario tests always perform ssh checks.
187
188 # Needed for the cross_tenant_traffic test:
189 if clients is None:
190 clients = self.os_primary
191
192 if name is None:
193 name = data_utils.rand_name(self.__class__.__name__ + "-server")
194
195 vnic_type = kwargs.pop('vnic_type', CONF.network.port_vnic_type)
196 profile = kwargs.pop('port_profile', CONF.network.port_profile)
197
198 # If vnic_type or profile are configured create port for
199 # every network
200 if vnic_type or profile:
201 ports = []
202 create_port_body = {}
203
204 if vnic_type:
205 create_port_body['binding:vnic_type'] = vnic_type
206
207 if profile:
208 create_port_body['binding:profile'] = profile
209
210 if kwargs:
211 # Convert security group names to security group ids
212 # to pass to create_port
213 if 'security_groups' in kwargs:
214 security_groups = \
215 clients.security_groups_client.list_security_groups(
216 ).get('security_groups')
217 sec_dict = dict([(s['name'], s['id'])
218 for s in security_groups])
219
220 sec_groups_names = [s['name'] for s in kwargs.pop(
221 'security_groups')]
222 security_groups_ids = [sec_dict[s]
223 for s in sec_groups_names]
224
225 if security_groups_ids:
226 create_port_body[
227 'security_groups'] = security_groups_ids
228 networks = kwargs.pop('networks', [])
229 else:
230 networks = []
231
232 # If there are no networks passed to us we look up
233 # for the project's private networks and create a port.
234 # The same behaviour as we would expect when passing
235 # the call to the clients with no networks
236 if not networks:
237 networks = clients.networks_client.list_networks(
238 **{'router:external': False, 'fields': 'id'})['networks']
239
240 # It's net['uuid'] if networks come from kwargs
241 # and net['id'] if they come from
242 # clients.networks_client.list_networks
243 for net in networks:
244 net_id = net.get('uuid', net.get('id'))
245 if 'port' not in net:
246 port = self.create_port(network_id=net_id,
247 client=clients.ports_client,
248 **create_port_body)
249 ports.append({'port': port['id']})
250 else:
251 ports.append({'port': net['port']})
252 if ports:
253 kwargs['networks'] = ports
254 self.ports = ports
255
256 tenant_network = self.get_tenant_network()
257
258 if CONF.compute.compute_volume_common_az:
259 kwargs.setdefault('availability_zone',
260 CONF.compute.compute_volume_common_az)
261
262 body, _ = compute.create_test_server(
263 clients,
264 tenant_network=tenant_network,
265 wait_until=wait_until,
266 name=name, flavor=flavor,
267 image_id=image_id, **kwargs)
268
269 self.addCleanup(waiters.wait_for_server_termination,
270 clients.servers_client, body['id'])
271 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
272 clients.servers_client.delete_server, body['id'])
273 server = clients.servers_client.show_server(body['id'])['server']
274 return server
275
276 def create_volume(self, size=None, name=None, snapshot_id=None,
277 imageRef=None, volume_type=None):
278 if size is None:
279 size = CONF.volume.volume_size
280 if imageRef:
281 if CONF.image_feature_enabled.api_v1:
282 resp = self.image_client.check_image(imageRef)
283 image = common_image.get_image_meta_from_headers(resp)
284 else:
285 image = self.image_client.show_image(imageRef)
286 min_disk = image.get('min_disk')
287 size = max(size, min_disk)
288 if name is None:
289 name = data_utils.rand_name(self.__class__.__name__ + "-volume")
290 kwargs = {'display_name': name,
291 'snapshot_id': snapshot_id,
292 'imageRef': imageRef,
293 'volume_type': volume_type,
294 'size': size}
295
296 if CONF.compute.compute_volume_common_az:
297 kwargs.setdefault('availability_zone',
298 CONF.compute.compute_volume_common_az)
299
300 volume = self.volumes_client.create_volume(**kwargs)['volume']
301
302 self.addCleanup(self.volumes_client.wait_for_resource_deletion,
303 volume['id'])
304 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
305 self.volumes_client.delete_volume, volume['id'])
306 self.assertEqual(name, volume['name'])
307 waiters.wait_for_volume_resource_status(self.volumes_client,
308 volume['id'], 'available')
309 # The volume retrieved on creation has a non-up-to-date status.
310 # Retrieval after it becomes active ensures correct details.
311 volume = self.volumes_client.show_volume(volume['id'])['volume']
312 return volume
313
314 def create_backup(self, volume_id, name=None, description=None,
315 force=False, snapshot_id=None, incremental=False,
316 container=None):
317
318 name = name or data_utils.rand_name(
319 self.__class__.__name__ + "-backup")
320 kwargs = {'name': name,
321 'description': description,
322 'force': force,
323 'snapshot_id': snapshot_id,
324 'incremental': incremental,
325 'container': container}
326 backup = self.backups_client.create_backup(volume_id=volume_id,
327 **kwargs)['backup']
328 self.addCleanup(self.backups_client.delete_backup, backup['id'])
329 waiters.wait_for_volume_resource_status(self.backups_client,
330 backup['id'], 'available')
331 return backup
332
333 def restore_backup(self, backup_id):
334 restore = self.backups_client.restore_backup(backup_id)['restore']
335 self.addCleanup(self.volumes_client.delete_volume,
336 restore['volume_id'])
337 waiters.wait_for_volume_resource_status(self.backups_client,
338 backup_id, 'available')
339 waiters.wait_for_volume_resource_status(self.volumes_client,
340 restore['volume_id'],
341 'available')
342 self.assertEqual(backup_id, restore['backup_id'])
343 return restore
344
345 def create_volume_snapshot(self, volume_id, name=None, description=None,
346 metadata=None, force=False):
347 name = name or data_utils.rand_name(
348 self.__class__.__name__ + '-snapshot')
349 snapshot = self.snapshots_client.create_snapshot(
350 volume_id=volume_id,
351 force=force,
352 display_name=name,
353 description=description,
354 metadata=metadata)['snapshot']
355 self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
356 snapshot['id'])
357 self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
358 waiters.wait_for_volume_resource_status(self.snapshots_client,
359 snapshot['id'], 'available')
360 snapshot = self.snapshots_client.show_snapshot(
361 snapshot['id'])['snapshot']
362 return snapshot
363
364 def _cleanup_volume_type(self, volume_type):
365 """Clean up a given volume type.
366
367 Ensuring all volumes associated to a type are first removed before
368 attempting to remove the type itself. This includes any image volume
369 cache volumes stored in a separate tenant to the original volumes
370 created from the type.
371 """
372 admin_volume_type_client = self.os_admin.volume_types_client_latest
373 admin_volumes_client = self.os_admin.volumes_client_latest
374 volumes = admin_volumes_client.list_volumes(
375 detail=True, params={'all_tenants': 1})['volumes']
376 type_name = volume_type['name']
377 for volume in [v for v in volumes if v['volume_type'] == type_name]:
378 test_utils.call_and_ignore_notfound_exc(
379 admin_volumes_client.delete_volume, volume['id'])
380 admin_volumes_client.wait_for_resource_deletion(volume['id'])
381 admin_volume_type_client.delete_volume_type(volume_type['id'])
382
383 def create_volume_type(self, client=None, name=None, backend_name=None):
384 if not client:
385 client = self.os_admin.volume_types_client_latest
386 if not name:
387 class_name = self.__class__.__name__
388 name = data_utils.rand_name(class_name + '-volume-type')
389 randomized_name = data_utils.rand_name('scenario-type-' + name)
390
391 LOG.debug("Creating a volume type: %s on backend %s",
392 randomized_name, backend_name)
393 extra_specs = {}
394 if backend_name:
395 extra_specs = {"volume_backend_name": backend_name}
396
397 volume_type = client.create_volume_type(
398 name=randomized_name, extra_specs=extra_specs)['volume_type']
399 self.addCleanup(self._cleanup_volume_type, volume_type)
400 return volume_type
401
402 def _create_loginable_secgroup_rule(self, secgroup_id=None):
403 _client = self.compute_security_groups_client
404 _client_rules = self.compute_security_group_rules_client
405 if secgroup_id is None:
406 sgs = _client.list_security_groups()['security_groups']
407 for sg in sgs:
408 if sg['name'] == 'default':
409 secgroup_id = sg['id']
410
411 # These rules are intended to permit inbound ssh and icmp
412 # traffic from all sources, so no group_id is provided.
413 # Setting a group_id would only permit traffic from ports
414 # belonging to the same security group.
415 rulesets = [
416 {
417 # ssh
418 'ip_protocol': 'tcp',
419 'from_port': 22,
420 'to_port': 22,
421 'cidr': '0.0.0.0/0',
422 },
423 {
424 # ping
425 'ip_protocol': 'icmp',
426 'from_port': -1,
427 'to_port': -1,
428 'cidr': '0.0.0.0/0',
429 }
430 ]
431 rules = list()
432 for ruleset in rulesets:
433 sg_rule = _client_rules.create_security_group_rule(
434 parent_group_id=secgroup_id, **ruleset)['security_group_rule']
435 rules.append(sg_rule)
436 return rules
437
438 def _create_security_group(self):
439 # Create security group
440 sg_name = data_utils.rand_name(self.__class__.__name__)
441 sg_desc = sg_name + " description"
442 secgroup = self.compute_security_groups_client.create_security_group(
443 name=sg_name, description=sg_desc)['security_group']
444 self.assertEqual(secgroup['name'], sg_name)
445 self.assertEqual(secgroup['description'], sg_desc)
446 self.addCleanup(
447 test_utils.call_and_ignore_notfound_exc,
448 self.compute_security_groups_client.delete_security_group,
449 secgroup['id'])
450
451 # Add rules to the security group
452 self._create_loginable_secgroup_rule(secgroup['id'])
453
454 return secgroup
455
456 def get_remote_client(self, ip_address, username=None, private_key=None,
457 server=None):
458 """Get a SSH client to a remote server
459
460 :param ip_address: the server floating or fixed IP address to use
461 for ssh validation
462 :param username: name of the Linux account on the remote server
463 :param private_key: the SSH private key to use
464 :param server: server dict, used for debugging purposes
465 :return: a RemoteClient object
466 """
467
468 if username is None:
469 username = CONF.validation.image_ssh_user
470 # Set this with 'keypair' or others to log in with keypair or
471 # username/password.
472 if CONF.validation.auth_method == 'keypair':
473 password = None
474 if private_key is None:
475 private_key = self.keypair['private_key']
476 else:
477 password = CONF.validation.image_ssh_password
478 private_key = None
479 linux_client = remote_client.RemoteClient(
480 ip_address, username, pkey=private_key, password=password,
481 server=server, servers_client=self.servers_client)
482 linux_client.validate_authentication()
483 return linux_client
484
485 def _log_net_info(self, exc):
486 # network debug is called as part of ssh init
487 if not isinstance(exc, lib_exc.SSHTimeout):
488 LOG.debug('Network information on a devstack host')
489
490 def create_server_snapshot(self, server, name=None):
491 # Glance client
492 _image_client = self.image_client
493 # Compute client
494 _images_client = self.compute_images_client
495 if name is None:
496 name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
497 LOG.debug("Creating a snapshot image for server: %s", server['name'])
498 image = _images_client.create_image(server['id'], name=name)
499 image_id = image.response['location'].split('images/')[1]
500 waiters.wait_for_image_status(_image_client, image_id, 'active')
501
502 self.addCleanup(_image_client.wait_for_resource_deletion,
503 image_id)
504 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
505 _image_client.delete_image, image_id)
506
507 if CONF.image_feature_enabled.api_v1:
508 # In glance v1 the additional properties are stored in the headers.
509 resp = _image_client.check_image(image_id)
510 snapshot_image = common_image.get_image_meta_from_headers(resp)
511 image_props = snapshot_image.get('properties', {})
512 else:
513 # In glance v2 the additional properties are flattened.
514 snapshot_image = _image_client.show_image(image_id)
515 image_props = snapshot_image
516
517 bdm = image_props.get('block_device_mapping')
518 if bdm:
519 bdm = json.loads(bdm)
520 if bdm and 'snapshot_id' in bdm[0]:
521 snapshot_id = bdm[0]['snapshot_id']
522 self.addCleanup(
523 self.snapshots_client.wait_for_resource_deletion,
524 snapshot_id)
525 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
526 self.snapshots_client.delete_snapshot,
527 snapshot_id)
528 waiters.wait_for_volume_resource_status(self.snapshots_client,
529 snapshot_id,
530 'available')
531 image_name = snapshot_image['name']
532 self.assertEqual(name, image_name)
533 LOG.debug("Created snapshot image %s for server %s",
534 image_name, server['name'])
535 return snapshot_image
536
537 def nova_volume_attach(self, server, volume_to_attach):
538 volume = self.servers_client.attach_volume(
539 server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
540 % CONF.compute.volume_device_name)['volumeAttachment']
541 self.assertEqual(volume_to_attach['id'], volume['id'])
542 waiters.wait_for_volume_resource_status(self.volumes_client,
543 volume['id'], 'in-use')
544
545 # Return the updated volume after the attachment
546 return self.volumes_client.show_volume(volume['id'])['volume']
547
548 def nova_volume_detach(self, server, volume):
549 self.servers_client.detach_volume(server['id'], volume['id'])
550 waiters.wait_for_volume_resource_status(self.volumes_client,
551 volume['id'], 'available')
552
553 def check_vm_connectivity(self, ip_address,
554 username=None,
555 private_key=None,
556 should_connect=True,
557 extra_msg="",
558 server=None,
559 mtu=None):
560 """Check server connectivity
561
562 :param ip_address: server to test against
563 :param username: server's ssh username
564 :param private_key: server's ssh private key to be used
565 :param should_connect: True/False indicates positive/negative test
566 positive - attempt ping and ssh
567 negative - attempt ping and fail if succeed
568 :param extra_msg: Message to help with debugging if ``ping_ip_address``
569 fails
570 :param server: The server whose console to log for debugging
571 :param mtu: network MTU to use for connectivity validation
572
573 :raises: AssertError if the result of the connectivity check does
574 not match the value of the should_connect param
575 """
576 LOG.debug('checking network connections to IP %s with user: %s',
577 ip_address, username)
578 if should_connect:
579 msg = "Timed out waiting for %s to become reachable" % ip_address
580 else:
581 msg = "ip address %s is reachable" % ip_address
582 if extra_msg:
583 msg = "%s\n%s" % (extra_msg, msg)
584 self.assertTrue(self.ping_ip_address(ip_address,
585 should_succeed=should_connect,
586 mtu=mtu, server=server),
587 msg=msg)
588 if should_connect:
589 # no need to check ssh for negative connectivity
590 try:
591 self.get_remote_client(ip_address, username, private_key,
592 server=server)
593 except Exception:
594 if not extra_msg:
595 extra_msg = 'Failed to ssh to %s' % ip_address
596 LOG.exception(extra_msg)
597 raise
598
599 def create_floating_ip(self, thing, pool_name=None):
600 """Create a floating IP and associates to a server on Nova"""
601
602 if not pool_name:
603 pool_name = CONF.network.floating_network_name
604 floating_ip = (self.compute_floating_ips_client.
605 create_floating_ip(pool=pool_name)['floating_ip'])
606 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
607 self.compute_floating_ips_client.delete_floating_ip,
608 floating_ip['id'])
609 self.compute_floating_ips_client.associate_floating_ip_to_server(
610 floating_ip['ip'], thing['id'])
611 return floating_ip
612
613 def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
614 private_key=None, server=None):
615 ssh_client = self.get_remote_client(ip_address,
616 private_key=private_key,
617 server=server)
618 if dev_name is not None:
619 ssh_client.make_fs(dev_name)
620 ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
621 mount_path))
622 cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
623 ssh_client.exec_command(cmd_timestamp)
624 timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
625 % mount_path)
626 if dev_name is not None:
627 ssh_client.exec_command('sudo umount %s' % mount_path)
628 return timestamp
629
630 def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
631 private_key=None, server=None):
632 ssh_client = self.get_remote_client(ip_address,
633 private_key=private_key,
634 server=server)
635 if dev_name is not None:
636 ssh_client.mount(dev_name, mount_path)
637 timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
638 % mount_path)
639 if dev_name is not None:
640 ssh_client.exec_command('sudo umount %s' % mount_path)
641 return timestamp
642
643 def get_server_ip(self, server):
644 """Get the server fixed or floating IP.
645
646 Based on the configuration we're in, return a correct ip
647 address for validating that a guest is up.
648 """
649 if CONF.validation.connect_method == 'floating':
650 # The tests calling this method don't have a floating IP
651 # and can't make use of the validation resources. So the
652 # method is creating the floating IP there.
653 return self.create_floating_ip(server)['ip']
654 elif CONF.validation.connect_method == 'fixed':
655 # Determine the network name to look for based on config or creds
656 # provider network resources.
657 if CONF.validation.network_for_ssh:
658 addresses = server['addresses'][
659 CONF.validation.network_for_ssh]
660 else:
661 network = self.get_tenant_network()
662 addresses = (server['addresses'][network['name']]
663 if network else [])
664 for address in addresses:
665 if (address['version'] == CONF.validation.ip_version_for_ssh and # noqa
666 address['OS-EXT-IPS:type'] == 'fixed'):
667 return address['addr']
668 raise exceptions.ServerUnreachable(server_id=server['id'])
669 else:
670 raise lib_exc.InvalidConfiguration()
671
672 @classmethod
673 def get_host_for_server(cls, server_id):
674 server_details = cls.os_admin.servers_client.show_server(server_id)
675 return server_details['server']['OS-EXT-SRV-ATTR:host']
676
677 def _get_bdm(self, source_id, source_type, delete_on_termination=False):
678 bd_map_v2 = [{
679 'uuid': source_id,
680 'source_type': source_type,
681 'destination_type': 'volume',
682 'boot_index': 0,
683 'delete_on_termination': delete_on_termination}]
684 return {'block_device_mapping_v2': bd_map_v2}
685
686 def boot_instance_from_resource(self, source_id,
687 source_type,
688 keypair=None,
689 security_group=None,
690 delete_on_termination=False,
691 name=None):
692 create_kwargs = dict()
693 if keypair:
694 create_kwargs['key_name'] = keypair['name']
695 if security_group:
696 create_kwargs['security_groups'] = [
697 {'name': security_group['name']}]
698 create_kwargs.update(self._get_bdm(
699 source_id,
700 source_type,
701 delete_on_termination=delete_on_termination))
702 if name:
703 create_kwargs['name'] = name
704
705 return self.create_server(image_id='', **create_kwargs)
706
707 def create_volume_from_image(self):
708 img_uuid = CONF.compute.image_ref
709 vol_name = data_utils.rand_name(
710 self.__class__.__name__ + '-volume-origin')
711 return self.create_volume(name=vol_name, imageRef=img_uuid)
712
713
714class NetworkScenarioTest(ScenarioTest):
715 """Base class for network scenario tests.
716
717 This class provide helpers for network scenario tests, using the neutron
718 API. Helpers from ancestor which use the nova network API are overridden
719 with the neutron API.
720
721 This Class also enforces using Neutron instead of novanetwork.
722 Subclassed tests will be skipped if Neutron is not enabled
723
724 """
725
726 credentials = ['primary', 'admin']
727
728 @classmethod
729 def skip_checks(cls):
730 super(NetworkScenarioTest, cls).skip_checks()
731 if not CONF.service_available.neutron:
732 raise cls.skipException('Neutron not available')
733
734 def _create_network(self, networks_client=None,
735 tenant_id=None,
736 namestart='network-smoke-',
737 port_security_enabled=True, **net_dict):
738 if not networks_client:
739 networks_client = self.networks_client
740 if not tenant_id:
741 tenant_id = networks_client.tenant_id
742 name = data_utils.rand_name(namestart)
743 network_kwargs = dict(name=name, tenant_id=tenant_id)
744 if net_dict:
745 network_kwargs.update(net_dict)
746 # Neutron disables port security by default so we have to check the
747 # config before trying to create the network with port_security_enabled
748 if CONF.network_feature_enabled.port_security:
749 network_kwargs['port_security_enabled'] = port_security_enabled
750 result = networks_client.create_network(**network_kwargs)
751 network = result['network']
752
753 self.assertEqual(network['name'], name)
754 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
755 networks_client.delete_network,
756 network['id'])
757 return network
758
759 def create_subnet(self, network, subnets_client=None,
760 namestart='subnet-smoke', **kwargs):
761 """Create a subnet for the given network
762
763 within the cidr block configured for tenant networks.
764 """
765 if not subnets_client:
766 subnets_client = self.subnets_client
767
768 def cidr_in_use(cidr, tenant_id):
769 """Check cidr existence
770
771 :returns: True if subnet with cidr already exist in tenant
772 False else
773 """
774 cidr_in_use = self.os_admin.subnets_client.list_subnets(
775 tenant_id=tenant_id, cidr=cidr)['subnets']
776 return len(cidr_in_use) != 0
777
778 ip_version = kwargs.pop('ip_version', 4)
779
780 if ip_version == 6:
781 tenant_cidr = netaddr.IPNetwork(
782 CONF.network.project_network_v6_cidr)
783 num_bits = CONF.network.project_network_v6_mask_bits
784 else:
785 tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
786 num_bits = CONF.network.project_network_mask_bits
787
788 result = None
789 str_cidr = None
790 # Repeatedly attempt subnet creation with sequential cidr
791 # blocks until an unallocated block is found.
792 for subnet_cidr in tenant_cidr.subnet(num_bits):
793 str_cidr = str(subnet_cidr)
794 if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
795 continue
796
797 subnet = dict(
798 name=data_utils.rand_name(namestart),
799 network_id=network['id'],
800 tenant_id=network['tenant_id'],
801 cidr=str_cidr,
802 ip_version=ip_version,
803 **kwargs
804 )
805 try:
806 result = subnets_client.create_subnet(**subnet)
807 break
808 except lib_exc.Conflict as e:
809 is_overlapping_cidr = 'overlaps with another subnet' in str(e)
810 if not is_overlapping_cidr:
811 raise
812 self.assertIsNotNone(result, 'Unable to allocate tenant network')
813
814 subnet = result['subnet']
815 self.assertEqual(subnet['cidr'], str_cidr)
816
817 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
818 subnets_client.delete_subnet, subnet['id'])
819
820 return subnet
821
822 def _get_server_port_id_and_ip4(self, server, ip_addr=None):
823 if ip_addr:
824 ports = self.os_admin.ports_client.list_ports(
825 device_id=server['id'],
826 fixed_ips='ip_address=%s' % ip_addr)['ports']
827 else:
828 ports = self.os_admin.ports_client.list_ports(
829 device_id=server['id'])['ports']
830 # A port can have more than one IP address in some cases.
831 # If the network is dual-stack (IPv4 + IPv6), this port is associated
832 # with 2 subnets
833 p_status = ['ACTIVE']
834 # NOTE(vsaienko) With Ironic, instances live on separate hardware
835 # servers. Neutron does not bind ports for Ironic instances, as a
836 # result the port remains in the DOWN state.
837 # TODO(vsaienko) remove once bug: #1599836 is resolved.
838 if getattr(CONF.service_available, 'ironic', False):
839 p_status.append('DOWN')
840 port_map = [(p["id"], fxip["ip_address"])
841 for p in ports
842 for fxip in p["fixed_ips"]
843 if (netutils.is_valid_ipv4(fxip["ip_address"]) and
844 p['status'] in p_status)]
845 inactive = [p for p in ports if p['status'] != 'ACTIVE']
846 if inactive:
847 LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
848
849 self.assertNotEmpty(port_map,
850 "No IPv4 addresses found in: %s" % ports)
851 self.assertEqual(len(port_map), 1,
852 "Found multiple IPv4 addresses: %s. "
853 "Unable to determine which port to target."
854 % port_map)
855 return port_map[0]
856
857 def _get_network_by_name(self, network_name):
858 net = self.os_admin.networks_client.list_networks(
859 name=network_name)['networks']
860 self.assertNotEmpty(net,
861 "Unable to get network by name: %s" % network_name)
862 return net[0]
863
864 def create_floating_ip(self, thing, external_network_id=None,
865 port_id=None, client=None):
866 """Create a floating IP and associates to a resource/port on Neutron"""
867 if not external_network_id:
868 external_network_id = CONF.network.public_network_id
869 if not client:
870 client = self.floating_ips_client
871 if not port_id:
872 port_id, ip4 = self._get_server_port_id_and_ip4(thing)
873 else:
874 ip4 = None
875 result = client.create_floatingip(
876 floating_network_id=external_network_id,
877 port_id=port_id,
878 tenant_id=thing['tenant_id'],
879 fixed_ip_address=ip4
880 )
881 floating_ip = result['floatingip']
882 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
883 client.delete_floatingip,
884 floating_ip['id'])
885 return floating_ip
886
887 def check_floating_ip_status(self, floating_ip, status):
888 """Verifies floatingip reaches the given status
889
890 :param dict floating_ip: floating IP dict to check status
891 :param status: target status
892 :raises: AssertionError if status doesn't match
893 """
894 floatingip_id = floating_ip['id']
895
896 def refresh():
897 result = (self.floating_ips_client.
898 show_floatingip(floatingip_id)['floatingip'])
899 return status == result['status']
900
901 if not test_utils.call_until_true(refresh,
902 CONF.network.build_timeout,
903 CONF.network.build_interval):
904 floating_ip = self.floating_ips_client.show_floatingip(
905 floatingip_id)['floatingip']
906 self.assertEqual(status, floating_ip['status'],
907 message="FloatingIP: {fp} is at status: {cst}. "
908 "failed to reach status: {st}"
909 .format(fp=floating_ip, cst=floating_ip['status'],
910 st=status))
911 LOG.info("FloatingIP: {fp} is at status: {st}"
912 .format(fp=floating_ip, st=status))
913
914 def _create_security_group(self, security_group_rules_client=None,
915 tenant_id=None,
916 namestart='secgroup-smoke',
917 security_groups_client=None):
918 if security_group_rules_client is None:
919 security_group_rules_client = self.security_group_rules_client
920 if security_groups_client is None:
921 security_groups_client = self.security_groups_client
922 if tenant_id is None:
923 tenant_id = security_groups_client.tenant_id
924 secgroup = self._create_empty_security_group(
925 namestart=namestart, client=security_groups_client,
926 tenant_id=tenant_id)
927
928 # Add rules to the security group
929 rules = self._create_loginable_secgroup_rule(
930 security_group_rules_client=security_group_rules_client,
931 secgroup=secgroup,
932 security_groups_client=security_groups_client)
933 for rule in rules:
934 self.assertEqual(tenant_id, rule['tenant_id'])
935 self.assertEqual(secgroup['id'], rule['security_group_id'])
936 return secgroup
937
938 def _create_empty_security_group(self, client=None, tenant_id=None,
939 namestart='secgroup-smoke'):
940 """Create a security group without rules.
941
942 Default rules will be created:
943 - IPv4 egress to any
944 - IPv6 egress to any
945
946 :param tenant_id: secgroup will be created in this tenant
947 :returns: the created security group
948 """
949 if client is None:
950 client = self.security_groups_client
951 if not tenant_id:
952 tenant_id = client.tenant_id
953 sg_name = data_utils.rand_name(namestart)
954 sg_desc = sg_name + " description"
955 sg_dict = dict(name=sg_name,
956 description=sg_desc)
957 sg_dict['tenant_id'] = tenant_id
958 result = client.create_security_group(**sg_dict)
959
960 secgroup = result['security_group']
961 self.assertEqual(secgroup['name'], sg_name)
962 self.assertEqual(tenant_id, secgroup['tenant_id'])
963 self.assertEqual(secgroup['description'], sg_desc)
964
965 self.addCleanup(test_utils.call_and_ignore_notfound_exc,
966 client.delete_security_group, secgroup['id'])
967 return secgroup
968
969 def _create_security_group_rule(self, secgroup=None,
970 sec_group_rules_client=None,
971 tenant_id=None,
972 security_groups_client=None, **kwargs):
973 """Create a rule from a dictionary of rule parameters.
974
975 Create a rule in a secgroup. if secgroup not defined will search for
976 default secgroup in tenant_id.
977
978 :param secgroup: the security group.
979 :param tenant_id: if secgroup not passed -- the tenant in which to
980 search for default secgroup
981 :param kwargs: a dictionary containing rule parameters:
982 for example, to allow incoming ssh:
983 rule = {
984 direction: 'ingress'
985 protocol:'tcp',
986 port_range_min: 22,
987 port_range_max: 22
988 }
989 """
990 if sec_group_rules_client is None:
991 sec_group_rules_client = self.security_group_rules_client
992 if security_groups_client is None:
993 security_groups_client = self.security_groups_client
994 if not tenant_id:
995 tenant_id = security_groups_client.tenant_id
996 if secgroup is None:
997 # Get default secgroup for tenant_id
998 default_secgroups = security_groups_client.list_security_groups(
999 name='default', tenant_id=tenant_id)['security_groups']
1000 msg = "No default security group for tenant %s." % (tenant_id)
1001 self.assertNotEmpty(default_secgroups, msg)
1002 secgroup = default_secgroups[0]
1003
1004 ruleset = dict(security_group_id=secgroup['id'],
1005 tenant_id=secgroup['tenant_id'])
1006 ruleset.update(kwargs)
1007
1008 sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
1009 sg_rule = sg_rule['security_group_rule']
1010
1011 self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
1012 self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
1013
1014 return sg_rule
1015
1016 def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
1017 secgroup=None,
1018 security_groups_client=None):
1019 """Create loginable security group rule
1020
1021 This function will create:
1022 1. egress and ingress tcp port 22 allow rule in order to allow ssh
1023 access for ipv4.
1024 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
1025 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
1026 """
1027
1028 if security_group_rules_client is None:
1029 security_group_rules_client = self.security_group_rules_client
1030 if security_groups_client is None:
1031 security_groups_client = self.security_groups_client
1032 rules = []
1033 rulesets = [
1034 dict(
1035 # ssh
1036 protocol='tcp',
1037 port_range_min=22,
1038 port_range_max=22,
1039 ),
1040 dict(
1041 # ping
1042 protocol='icmp',
1043 ),
1044 dict(
1045 # ipv6-icmp for ping6
1046 protocol='icmp',
1047 ethertype='IPv6',
1048 )
1049 ]
1050 sec_group_rules_client = security_group_rules_client
1051 for ruleset in rulesets:
1052 for r_direction in ['ingress', 'egress']:
1053 ruleset['direction'] = r_direction
1054 try:
1055 sg_rule = self._create_security_group_rule(
1056 sec_group_rules_client=sec_group_rules_client,
1057 secgroup=secgroup,
1058 security_groups_client=security_groups_client,
1059 **ruleset)
1060 except lib_exc.Conflict as ex:
1061 # if rule already exist - skip rule and continue
1062 msg = 'Security group rule already exists'
1063 if msg not in ex._error_string:
1064 raise ex
1065 else:
1066 self.assertEqual(r_direction, sg_rule['direction'])
1067 rules.append(sg_rule)
1068
1069 return rules
1070
1071
1072class EncryptionScenarioTest(ScenarioTest):
1073 """Base class for encryption scenario tests"""
1074
1075 credentials = ['primary', 'admin']
1076
1077 @classmethod
1078 def setup_clients(cls):
1079 super(EncryptionScenarioTest, cls).setup_clients()
1080 cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
1081 cls.admin_encryption_types_client =\
1082 cls.os_admin.encryption_types_client_latest
1083
1084 def create_encryption_type(self, client=None, type_id=None, provider=None,
1085 key_size=None, cipher=None,
1086 control_location=None):
1087 if not client:
1088 client = self.admin_encryption_types_client
1089 if not type_id:
1090 volume_type = self.create_volume_type()
1091 type_id = volume_type['id']
1092 LOG.debug("Creating an encryption type for volume type: %s", type_id)
1093 client.create_encryption_type(
1094 type_id, provider=provider, key_size=key_size, cipher=cipher,
1095 control_location=control_location)
1096
1097 def create_encrypted_volume(self, encryption_provider, volume_type,
1098 key_size=256, cipher='aes-xts-plain64',
1099 control_location='front-end'):
1100 volume_type = self.create_volume_type(name=volume_type)
1101 self.create_encryption_type(type_id=volume_type['id'],
1102 provider=encryption_provider,
1103 key_size=key_size,
1104 cipher=cipher,
1105 control_location=control_location)
1106 return self.create_volume(volume_type=volume_type['name'])