Switch to local copy of scenario test base class
This is in response to the mailing list post at:
http://lists.openstack.org/pipermail/openstack-dev/2017-February/112938.html
The tempest.scenario.manager class will be refactored,
so a local copy will be maintained in the
barbican_tempest_plugin repository until that refactor
is complete.
Change-Id: I706e256fe4e224e44532ea0ce2c81495791ec97f
diff --git a/barbican_tempest_plugin/tests/scenario/barbican_manager.py b/barbican_tempest_plugin/tests/scenario/barbican_manager.py
index c279f04..0844d96 100644
--- a/barbican_tempest_plugin/tests/scenario/barbican_manager.py
+++ b/barbican_tempest_plugin/tests/scenario/barbican_manager.py
@@ -28,9 +28,9 @@
from oslo_log import log as logging
from tempest import config
-from tempest.scenario import manager as mgr
from barbican_tempest_plugin import clients
+from barbican_tempest_plugin.tests.scenario import manager as mgr
CONF = config.CONF
LOG = logging.getLogger(__name__)
diff --git a/barbican_tempest_plugin/tests/scenario/manager.py b/barbican_tempest_plugin/tests/scenario/manager.py
new file mode 100644
index 0000000..385c590
--- /dev/null
+++ b/barbican_tempest_plugin/tests/scenario/manager.py
@@ -0,0 +1,322 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+
+from tempest.common import compute
+from tempest.common import image as common_image
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+import tempest.test
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class ScenarioTest(tempest.test.BaseTestCase):
+ """Base class for scenario tests. Uses tempest own clients. """
+
+ credentials = ['primary']
+
+ @classmethod
+ def setup_clients(cls):
+ super(ScenarioTest, cls).setup_clients()
+ # Clients (in alphabetical order)
+ cls.flavors_client = cls.manager.flavors_client
+ cls.compute_floating_ips_client = (
+ cls.manager.compute_floating_ips_client)
+ if CONF.service_available.glance:
+ # Check if glance v1 is available to determine which client to use.
+ if CONF.image_feature_enabled.api_v1:
+ cls.image_client = cls.manager.image_client
+ elif CONF.image_feature_enabled.api_v2:
+ cls.image_client = cls.manager.image_client_v2
+ else:
+ raise lib_exc.InvalidConfiguration(
+ 'Either api_v1 or api_v2 must be True in '
+ '[image-feature-enabled].')
+ # Compute image client
+ cls.compute_images_client = cls.manager.compute_images_client
+ cls.keypairs_client = cls.manager.keypairs_client
+ # Nova security groups client
+ cls.compute_security_groups_client = (
+ cls.manager.compute_security_groups_client)
+ cls.compute_security_group_rules_client = (
+ cls.manager.compute_security_group_rules_client)
+ cls.servers_client = cls.manager.servers_client
+ # Neutron network client
+ cls.networks_client = cls.manager.networks_client
+ cls.ports_client = cls.manager.ports_client
+ cls.routers_client = cls.manager.routers_client
+ cls.subnets_client = cls.manager.subnets_client
+ cls.floating_ips_client = cls.manager.floating_ips_client
+ cls.security_groups_client = cls.manager.security_groups_client
+ cls.security_group_rules_client = (
+ cls.manager.security_group_rules_client)
+
+ if CONF.volume_feature_enabled.api_v2:
+ cls.volumes_client = cls.manager.volumes_v2_client
+ cls.snapshots_client = cls.manager.snapshots_v2_client
+ else:
+ cls.volumes_client = cls.manager.volumes_client
+ cls.snapshots_client = cls.manager.snapshots_client
+
+ # ## Test functions library
+ #
+ # The create_[resource] functions only return body and discard the
+ # resp part which is not used in scenario tests
+
+ def _create_port(self, network_id, client=None, namestart='port-quotatest',
+ **kwargs):
+ if not client:
+ client = self.ports_client
+ name = data_utils.rand_name(namestart)
+ result = client.create_port(
+ name=name,
+ network_id=network_id,
+ **kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
+ port = result['port']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_port, port['id'])
+ return port
+
+ def create_keypair(self, client=None):
+ if not client:
+ client = self.keypairs_client
+ name = data_utils.rand_name(self.__class__.__name__)
+ # We don't need to create a keypair by pubkey in scenario
+ body = client.create_keypair(name=name)
+ self.addCleanup(client.delete_keypair, name)
+ return body['keypair']
+
+ def create_server(self, name=None, image_id=None, flavor=None,
+ validatable=False, wait_until='ACTIVE',
+ clients=None, **kwargs):
+ """Wrapper utility that returns a test server.
+
+ This wrapper utility calls the common create test server and
+ returns a test server. The purpose of this wrapper is to minimize
+ the impact on the code of the tests already using this
+ function.
+ """
+
+ # NOTE(jlanoux): As a first step, ssh checks in the scenario
+ # tests need to be run regardless of the run_validation and
+ # validatable parameters and thus until the ssh validation job
+ # becomes voting in CI. The test resources management and IP
+ # association are taken care of in the scenario tests.
+ # Therefore, the validatable parameter is set to false in all
+ # those tests. In this way create_server just return a standard
+ # server and the scenario tests always perform ssh checks.
+
+ # Needed for the cross_tenant_traffic test:
+ if clients is None:
+ clients = self.manager
+
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__ + "-server")
+
+ vnic_type = CONF.network.port_vnic_type
+
+ # If vnic_type is configured create port for
+ # every network
+ if vnic_type:
+ ports = []
+
+ create_port_body = {'binding:vnic_type': vnic_type,
+ 'namestart': 'port-smoke'}
+ if kwargs:
+ # Convert security group names to security group ids
+ # to pass to create_port
+ if 'security_groups' in kwargs:
+ security_groups = \
+ clients.security_groups_client.list_security_groups(
+ ).get('security_groups')
+ sec_dict = dict([(s['name'], s['id'])
+ for s in security_groups])
+
+ sec_groups_names = [s['name'] for s in kwargs.pop(
+ 'security_groups')]
+ security_groups_ids = [sec_dict[s]
+ for s in sec_groups_names]
+
+ if security_groups_ids:
+ create_port_body[
+ 'security_groups'] = security_groups_ids
+ networks = kwargs.pop('networks', [])
+ else:
+ networks = []
+
+ # If there are no networks passed to us we look up
+ # for the project's private networks and create a port.
+ # The same behaviour as we would expect when passing
+ # the call to the clients with no networks
+ if not networks:
+ networks = clients.networks_client.list_networks(
+ **{'router:external': False, 'fields': 'id'})['networks']
+
+ # It's net['uuid'] if networks come from kwargs
+ # and net['id'] if they come from
+ # clients.networks_client.list_networks
+ for net in networks:
+ net_id = net.get('uuid', net.get('id'))
+ if 'port' not in net:
+ port = self._create_port(network_id=net_id,
+ client=clients.ports_client,
+ **create_port_body)
+ ports.append({'port': port['id']})
+ else:
+ ports.append({'port': net['port']})
+ if ports:
+ kwargs['networks'] = ports
+ self.ports = ports
+
+ tenant_network = self.get_tenant_network()
+
+ body, servers = compute.create_test_server(
+ clients,
+ tenant_network=tenant_network,
+ wait_until=wait_until,
+ name=name, flavor=flavor,
+ image_id=image_id, **kwargs)
+
+ self.addCleanup(waiters.wait_for_server_termination,
+ clients.servers_client, body['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ clients.servers_client.delete_server, body['id'])
+ server = clients.servers_client.show_server(body['id'])['server']
+ return server
+
+ def create_volume(self, size=None, name=None, snapshot_id=None,
+ imageRef=None, volume_type=None):
+ if size is None:
+ size = CONF.volume.volume_size
+ if imageRef:
+ image = self.compute_images_client.show_image(imageRef)['image']
+ min_disk = image.get('minDisk')
+ size = max(size, min_disk)
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__ + "-volume")
+ kwargs = {'display_name': name,
+ 'snapshot_id': snapshot_id,
+ 'imageRef': imageRef,
+ 'volume_type': volume_type,
+ 'size': size}
+ volume = self.volumes_client.create_volume(**kwargs)['volume']
+
+ self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+ volume['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.volumes_client.delete_volume, volume['id'])
+
+ # NOTE(e0ne): Cinder API v2 uses name instead of display_name
+ if 'display_name' in volume:
+ self.assertEqual(name, volume['display_name'])
+ else:
+ self.assertEqual(name, volume['name'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+ return volume
+
+ def create_volume_type(self, client=None, name=None, backend_name=None):
+ if not client:
+ client = self.admin_volume_types_client
+ if not name:
+ class_name = self.__class__.__name__
+ name = data_utils.rand_name(class_name + '-volume-type')
+ randomized_name = data_utils.rand_name('scenario-type-' + name)
+
+ LOG.debug("Creating a volume type: %s on backend %s",
+ randomized_name, backend_name)
+ extra_specs = {}
+ if backend_name:
+ extra_specs = {"volume_backend_name": backend_name}
+
+ body = client.create_volume_type(name=randomized_name,
+ extra_specs=extra_specs)
+ volume_type = body['volume_type']
+ self.assertIn('id', volume_type)
+ self.addCleanup(client.delete_volume_type, volume_type['id'])
+ return volume_type
+
+ def _image_create(self, name, fmt, path,
+ disk_format=None, properties=None):
+ if properties is None:
+ properties = {}
+ name = data_utils.rand_name('%s-' % name)
+ params = {
+ 'name': name,
+ 'container_format': fmt,
+ 'disk_format': disk_format or fmt,
+ }
+ if CONF.image_feature_enabled.api_v1:
+ params['is_public'] = 'False'
+ params['properties'] = properties
+ params = {'headers': common_image.image_meta_to_headers(**params)}
+ else:
+ params['visibility'] = 'private'
+ # Additional properties are flattened out in the v2 API.
+ params.update(properties)
+ body = self.image_client.create_image(**params)
+ image = body['image'] if 'image' in body else body
+ self.addCleanup(self.image_client.delete_image, image['id'])
+ self.assertEqual("queued", image['status'])
+ with open(path, 'rb') as image_file:
+ if CONF.image_feature_enabled.api_v1:
+ self.image_client.update_image(image['id'], data=image_file)
+ else:
+ self.image_client.store_image_file(image['id'], image_file)
+ return image['id']
+
+ def rebuild_server(self, server_id, image=None,
+ preserve_ephemeral=False, wait=True,
+ rebuild_kwargs=None):
+ if image is None:
+ image = CONF.compute.image_ref
+
+ rebuild_kwargs = rebuild_kwargs or {}
+
+ LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
+ server_id, image, preserve_ephemeral)
+ self.servers_client.rebuild_server(
+ server_id=server_id, image_ref=image,
+ preserve_ephemeral=preserve_ephemeral,
+ **rebuild_kwargs)
+ if wait:
+ waiters.wait_for_server_status(self.servers_client,
+ server_id, 'ACTIVE')
+
+ def create_floating_ip(self, thing, pool_name=None):
+ """Create a floating IP and associates to a server on Nova"""
+
+ if not pool_name:
+ pool_name = CONF.network.floating_network_name
+ floating_ip = (self.compute_floating_ips_client.
+ create_floating_ip(pool=pool_name)['floating_ip'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.compute_floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
+ self.compute_floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], thing['id'])
+ return floating_ip