Merge "Add project prefix to blacklist"
diff --git a/.zuul.yaml b/.zuul.yaml
index 5bec9f9..ecc9284 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -273,6 +273,21 @@
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
tempest_concurrency: 2
group-vars:
# NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
diff --git a/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml b/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml
new file mode 100644
index 0000000..8475f50
--- /dev/null
+++ b/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml
@@ -0,0 +1,8 @@
+---
+other:
+ - |
+ New configuration options ``[compute]/certified_image_ref`` and
+ ``[compute]/certified_image_trusted_certs`` have been introduced. These
+ are required in order to run the ``ServerShowV263Test`` test and allow a
+ signed image with the required img_signature_* properties set along
+ with a list of trusted certificates to be used during the test.
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index e71e642..aaf7a5a 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -487,6 +487,9 @@
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index 3a474e6..235049a 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -70,9 +70,7 @@
self.assertEqual(min_img_ram, image['min_ram'])
# Try to create server with flavor of insufficient ram size
- self.assertRaisesRegex(lib_exc.BadRequest,
- "Flavor's memory is too small for "
- "requested image",
- self.create_test_server,
- image_id=image['id'],
- flavor=flavor['id'])
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_test_server,
+ image_id=image['id'],
+ flavor=flavor['id'])
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index e8b1161..91ebbc0 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -186,10 +186,17 @@
min_microversion = '2.63'
max_microversion = 'latest'
+ @testtools.skipUnless(CONF.compute.certified_image_ref,
+ '``[compute]/certified_image_ref`` required to test '
+ 'image certificate validation.')
+ @testtools.skipUnless(CONF.compute.certified_image_trusted_certs,
+ '``[compute]/certified_image_trusted_certs`` '
+ 'required to test image certificate validation.')
@decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
def test_show_update_rebuild_list_server(self):
- trusted_certs = ['test-cert-1', 'test-cert-2']
+ trusted_certs = CONF.compute.certified_image_trusted_certs
server = self.create_test_server(
+ imageRef=CONF.compute.certified_image_ref,
trusted_image_certificates=trusted_certs,
wait_until='ACTIVE')
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 64fe29a..1bfd075 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -124,6 +124,10 @@
name = data_utils.rand_name(cls.__name__ + '-Volume')
kwargs['name'] = name
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.delete_volume, cls.volumes_client,
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index e6db2e9..f0d7264 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -94,6 +94,8 @@
class TempestCleanup(command.Command):
+ GOT_EXCEPTIONS = []
+
def take_action(self, parsed_args):
try:
self.init(parsed_args)
@@ -103,6 +105,8 @@
LOG.exception("Failure during cleanup")
traceback.print_exc()
raise
+ if self.GOT_EXCEPTIONS:
+ raise Exception(self.GOT_EXCEPTIONS)
def init(self, parsed_args):
cleanup_service.init_conf()
@@ -159,7 +163,8 @@
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
- 'is_save_state': is_save_state}
+ 'is_save_state': is_save_state,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
@@ -200,7 +205,8 @@
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
'is_save_state': False,
- 'project_id': project_id}
+ 'project_id': project_id,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.project_services:
svc = service(mgr, **kwargs)
svc.run()
@@ -300,7 +306,8 @@
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
- 'is_save_state': True}
+ 'is_save_state': True,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 104958a..ccceb34 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -22,6 +22,7 @@
from tempest.common import utils
from tempest.common.utils import net_info
from tempest import config
+from tempest.lib import exceptions
LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -127,12 +128,23 @@
pass
def run(self):
- if self.is_dry_run:
- self.dry_run()
- elif self.is_save_state:
- self.save_state()
- else:
- self.delete()
+ try:
+ if self.is_dry_run:
+ self.dry_run()
+ elif self.is_save_state:
+ self.save_state()
+ else:
+ self.delete()
+ except exceptions.NotImplemented as exc:
+ # Many OpenStack services use extensions logic to implement the
+ # features or resources. Tempest cleanup tries to clean up the test
+ # resources without having much logic of extensions checks etc.
+ # If any of the extension is missing then, service will return
+ # NotImplemented error.
+ msg = ("Got NotImplemented error in %s, full exception: %s" %
+ (str(self.__class__), str(exc)))
+ LOG.exception(msg)
+ self.got_exceptions.append(msg)
class SnapshotService(BaseService):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 4ac92d9..cd85ede 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -167,6 +167,9 @@
params = {'name': volume_name,
'imageRef': image_id,
'size': CONF.volume.volume_size}
+ if CONF.compute.compute_volume_common_az:
+ params.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
volume = volumes_client.create_volume(**params)
try:
waiters.wait_for_volume_resource_status(volumes_client,
@@ -193,6 +196,9 @@
# to be specified.
image_id = ''
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
body = clients.servers_client.create_server(name=name, imageRef=image_id,
flavorRef=flavor,
**kwargs)
diff --git a/tempest/config.py b/tempest/config.py
index 82cbe09..4f0774f 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -271,6 +271,17 @@
help="Valid secondary image reference to be used in tests. "
"This is a required option, but if only one image is "
"available duplicate the value of image_ref above"),
+ cfg.StrOpt('certified_image_ref',
+ help="Valid image reference to be used in image certificate "
+ "validation tests when enabled. This image must also "
+ "have the required img_signature_* properties set. "
+ "Additional details available within the following Nova "
+ "documentation: https://docs.openstack.org/nova/latest/"
+ "user/certificate-validation.html"),
+ cfg.ListOpt('certified_image_trusted_certs',
+ help="A list of trusted certificates to be used when the "
+ "image certificate validation compute feature is "
+ "enabled."),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
@@ -739,7 +750,13 @@
help="Does the test environment support port security?"),
cfg.BoolOpt('floating_ips',
default=True,
- help='Does the test environment support floating_ips')
+ help='Does the test environment support floating_ips'),
+ cfg.StrOpt('qos_placement_physnet', default=None,
+ help='Name of the physnet for placement based minimum '
+ 'bandwidth allocation.'),
+ cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
+ help='Base segmentation ID to create provider networks. '
+ 'This value will be increased in case of conflict.')
]
validation_group = cfg.OptGroup(name='validation',
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 6ce5b78..1252f09 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -835,13 +835,15 @@
def _create_network(self, networks_client=None,
tenant_id=None,
namestart='network-smoke-',
- port_security_enabled=True):
+ port_security_enabled=True, **net_dict):
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
tenant_id = networks_client.tenant_id
name = data_utils.rand_name(namestart)
network_kwargs = dict(name=name, tenant_id=tenant_id)
+ if net_dict:
+ network_kwargs.update(net_dict)
# Neutron disables port security by default so we have to check the
# config before trying to create the network with port_security_enabled
if CONF.network_feature_enabled.port_security:
@@ -1266,7 +1268,7 @@
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
- port_security_enabled=True):
+ port_security_enabled=True, **net_dict):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
@@ -1274,6 +1276,11 @@
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
+ :param port_security_enabled: whether or not port_security is enabled
+ :param net_dict: a dict containing experimental network information in
+ a form like this: {'provider:network_type': 'vlan',
+ 'provider:physical_network': 'foo',
+ 'provider:segmentation_id': '42'}
:returns: network, subnet, router
"""
if CONF.network.shared_physical_network:
@@ -1293,7 +1300,8 @@
network = self._create_network(
networks_client=networks_client,
tenant_id=tenant_id,
- port_security_enabled=port_security_enabled)
+ port_security_enabled=port_security_enabled,
+ **net_dict)
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
new file mode 100644
index 0000000..e7085f6
--- /dev/null
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.scenario import manager
+
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
+ credentials = ['primary', 'admin']
+ required_extensions = ['port-resource-request',
+ 'qos',
+ 'qos-bw-minimum-ingress']
+ # The feature QoS minimum bandwidth allocation in Placement API depends on
+ # Granular resource requests to GET /allocation_candidates and Support
+ # allocation candidates with nested resource providers features in
+ # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
+ # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
+ # means that the minimum placement microversion is 1.29
+ placement_min_microversion = '1.29'
+ placement_max_microversion = 'latest'
+
+ # Nova rejects to boot VM with port which has resource_request field, below
+ # microversion 2.72
+ compute_min_microversion = '2.72'
+ compute_max_microversion = 'latest'
+
+ INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+ INGRESS_DIRECTION = 'ingress'
+
+ SMALLEST_POSSIBLE_BW = 1
+ # For any realistic inventory value (that is inventory != MAX_INT) an
+ # allocation candidate request of MAX_INT is expected to be rejected, see:
+ # https://github.com/openstack/placement/blob/master/placement/
+ # db/constants.py#L16
+ PLACEMENT_MAX_INT = 0x7FFFFFFF
+
+ @classmethod
+ def setup_clients(cls):
+ super(MinBwAllocationPlacementTest, cls).setup_clients()
+ cls.placement_client = cls.os_admin.placement_client
+ cls.networks_client = cls.os_admin.networks_client
+ cls.subnets_client = cls.os_admin.subnets_client
+ cls.routers_client = cls.os_adm.routers_client
+ cls.qos_client = cls.os_admin.qos_client
+ cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+
+ @classmethod
+ def skip_checks(cls):
+ super(MinBwAllocationPlacementTest, cls).skip_checks()
+ if not CONF.network_feature_enabled.qos_placement_physnet:
+ msg = "Skipped as no physnet is available in config for " \
+ "placement based QoS allocation."
+ raise cls.skipException(msg)
+
+ def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
+ policy = self.qos_client.create_qos_policy(
+ name=data_utils.rand_name(name_prefix),
+ shared=True)['policy']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.qos_client.delete_qos_policy, policy['id'])
+ rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+ policy['id'],
+ **{
+ 'min_kbps': min_kbps,
+ 'direction': self.INGRESS_DIRECTION
+ })['minimum_bandwidth_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
+ rule['id'])
+
+ return policy
+
+ def _create_qos_policies(self):
+ self.qos_policy_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_valid',
+ min_kbps=self.SMALLEST_POSSIBLE_BW)
+ self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_not_valid',
+ min_kbps=self.PLACEMENT_MAX_INT)
+
+ def _create_network_and_qos_policies(self):
+ physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+ base_segm = \
+ CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+ self.prov_network, _, _ = self.create_networks(
+ networks_client=self.networks_client,
+ routers_client=self.routers_client,
+ subnets_client=self.subnets_client,
+ **{
+ 'shared': True,
+ 'provider:network_type': 'vlan',
+ 'provider:physical_network': physnet_name,
+ 'provider:segmentation_id': base_segm
+ })
+
+ self._create_qos_policies()
+
+ def _check_if_allocation_is_possible(self):
+ alloc_candidates = self.placement_client.list_allocation_candidates(
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+ self.SMALLEST_POSSIBLE_BW))
+ if len(alloc_candidates['provider_summaries']) == 0:
+ self.fail('No allocation candidates are available for %s:%s' %
+ (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+
+ # Just to be sure check with impossible high (placement max_int),
+ # allocation
+ alloc_candidates = self.placement_client.list_allocation_candidates(
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+ self.PLACEMENT_MAX_INT))
+ if len(alloc_candidates['provider_summaries']) != 0:
+ self.fail('For %s:%s there should be no available candidate!' %
+ (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+
+ @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_basic(self):
+ """"Basic scenario with QoS min bw allocation in placement.
+
+ Steps:
+ * Create prerequisites:
+ ** VLAN type provider network with subnet.
+ ** valid QoS policy with minimum bandwidth rule with min_kbps=1
+ (This is a simplification to skip the checks in placement for
+ detecting the resource provider tree and inventories, as if
+ bandwidth resource is available 1 kbs will be available).
+ ** invalid QoS policy with minimum bandwidth rule with
+ min_kbs=max integer from placement (this is a simplification again
+ to avoid detection of RP tress and inventories, as placement will
+ reject such big allocation).
+ * Create port with valid QoS policy, and boot VM with that, it should
+ pass.
+ * Create port with invalid QoS policy, and try to boot VM with that,
+ it should fail.
+ """
+
+ self._check_if_allocation_is_possible()
+
+ self._create_network_and_qos_policies()
+
+ valid_port = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': valid_port['id']}])
+ allocations = self.placement_client.list_allocations(server1['id'])
+
+ self.assertGreater(len(allocations['allocations']), 0)
+ bw_resource_in_alloc = False
+ for rp, resources in allocations['allocations'].items():
+ if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+ bw_resource_in_alloc = True
+ self.assertTrue(bw_resource_in_alloc)
+
+ # boot another vm with max int bandwidth
+ not_valid_port = self.create_port(
+ self.prov_network['id'],
+ qos_policy_id=self.qos_policy_not_valid['id'])
+ server2 = self.create_server(
+ wait_until=None,
+ networks=[{'port': not_valid_port['id']}])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server2['id'],
+ status='ERROR', ready_wait=False, raise_on_error=False)
+ allocations = self.placement_client.list_allocations(server2['id'])
+
+ self.assertEqual(0, len(allocations['allocations']))
+ server2 = self.servers_client.show_server(server2['id'])
+ self.assertIn('fault', server2['server'])
+ self.assertIn('No valid host', server2['server']['fault']['message'])
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
index b47da0b..1618df9 100644
--- a/tempest/tests/cmd/test_cleanup.py
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import mock
+
from tempest.cmd import cleanup
from tempest.tests import base
@@ -24,3 +26,17 @@
test_saved_json = 'tempest/tests/cmd/test_saved_state_json.json'
# test if the file is loaded without any issues/exceptions
c._load_json(test_saved_json)
+
+ @mock.patch('tempest.cmd.cleanup.TempestCleanup.init')
+ @mock.patch('tempest.cmd.cleanup.TempestCleanup._cleanup')
+ def test_take_action_got_exception(self, mock_cleanup, mock_init):
+ c = cleanup.TempestCleanup(None, None, 'test')
+ c.GOT_EXCEPTIONS.append('exception')
+ mock_cleanup.return_value = True
+ mock_init.return_value = True
+ try:
+ c.take_action(mock.Mock())
+ except Exception as exc:
+ self.assertEqual(str(exc), '[\'exception\']')
+ return
+ assert False
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 3262b1c..de0dbec 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -19,6 +19,7 @@
from tempest import clients
from tempest.cmd import cleanup_service
from tempest import config
+from tempest.lib import exceptions
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_credentials
@@ -27,13 +28,24 @@
class TestBaseService(base.TestCase):
+ class TestException(cleanup_service.BaseService):
+ def delete(self):
+ raise exceptions.NotImplemented
+
+ def dry_run(self):
+ raise exceptions.NotImplemented
+
+ def save_state(self):
+ raise exceptions.NotImplemented
+
def test_base_service_init(self):
kwargs = {'data': {'data': 'test'},
'is_dry_run': False,
'saved_state_json': {'saved': 'data'},
'is_preserve': False,
'is_save_state': True,
- 'tenant_id': 'project_id'}
+ 'tenant_id': 'project_id',
+ 'got_exceptions': []}
base = cleanup_service.BaseService(kwargs)
self.assertEqual(base.data, kwargs['data'])
self.assertFalse(base.is_dry_run)
@@ -41,6 +53,28 @@
self.assertFalse(base.is_preserve)
self.assertTrue(base.is_save_state)
self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
+ self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
+
+ def test_not_implemented_ex(self):
+ kwargs = {'data': {'data': 'test'},
+ 'is_dry_run': False,
+ 'saved_state_json': {'saved': 'data'},
+ 'is_preserve': False,
+ 'is_save_state': False,
+ 'tenant_id': 'project_id',
+ 'got_exceptions': []}
+ base = self.TestException(kwargs)
+ # delete
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 1)
+ # save_state
+ base.save_state = True
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 2)
+ # dry_run
+ base.is_dry_run = True
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 3)
class MockFunctionsBase(base.TestCase):