Merge "Use assertIn to check for subnet membership"
diff --git a/HACKING.rst b/HACKING.rst
index 7363e7f..607682b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -11,7 +11,7 @@
- [T102] Cannot import OpenStack python clients in tempest/api &
tempest/scenario tests
- [T104] Scenario tests require a services decorator
-- [T105] Unit tests cannot use setUpClass
+- [T105] Tests cannot use setUpClass/tearDownClass
- [T106] vim configuration should not be kept in source files.
- [N322] Method's default argument shouldn't be mutable
@@ -108,6 +108,46 @@
in tempest.api.compute would require a service tag for those services, however
they do not need to be tagged as compute.
+Test fixtures and resources
+---------------------------
+Test level resources should be cleaned-up after the test execution. Clean-up
+is best scheduled using `addCleanup` which ensures that the resource cleanup
+code is always invoked, and in reverse order with respect to the creation
+order.
+
+Test class level resources should be defined in the `resource_setup` method of
+the test class, except for any credential obtained from the credentials
+provider, which should be set-up in the `setup_credentials` method.
+
+The test base class `BaseTestCase` defines Tempest framework for class level
+fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
+overwritten by subclasses (enforced via hacking rule T105).
+
+Set-up is split in a series of steps (setup stages), which can be overwritten
+by test classes. Set-up stages are:
+- `skip_checks`
+- `setup_credentials`
+- `setup_clients`
+- `resource_setup`
+
+Tear-down is also split in a series of steps (teardown stages), which are
+stacked for execution only if the corresponding setup stage had been
+reached during the setup phase. Tear-down stages are:
+- `clear_isolated_creds` (defined in the base test class)
+- `resource_cleanup`
+
+Skipping Tests
+--------------
+Skipping tests should be based on configuration only. If that is not possible,
+it is likely that either a configuration flag is missing, or the test should
+fail rather than be skipped.
+Using discovery for skipping tests is generally discouraged.
+
+When running a test that requires a certain "feature" in the target
+cloud, if that feature is missing we should fail, because either the test
+configuration is invalid, or the cloud is broken and the expected "feature" is
+not there even if the cloud was configured with it.
+
Negative Tests
--------------
Newly added negative tests should use the negative test framework. First step
diff --git a/tempest/api/baremetal/admin/test_ports.py b/tempest/api/baremetal/admin/test_ports.py
index b3f9b7f..3392ab9 100644
--- a/tempest/api/baremetal/admin/test_ports.py
+++ b/tempest/api/baremetal/admin/test_ports.py
@@ -57,11 +57,13 @@
_, body = self.client.show_port(uuid)
self._assertExpected(port, body)
+ @test.skip_because(bug='1398350')
@test.attr(type='smoke')
def test_create_port_with_extra(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- extra = {'key': 'value'}
+ extra = {'str': 'value', 'int': 123, 'float': 0.123,
+ 'bool': True, 'list': [1, 2, 3], 'dict': {'foo': 'bar'}}
_, port = self.create_port(node_id=node_id, address=address,
extra=extra)
@@ -224,6 +226,7 @@
_, body = self.client.show_port(port['uuid'])
self.assertEqual(extra, body['extra'])
+ @test.skip_because(bug='1398350')
@test.attr(type='smoke')
def test_update_port_mixed_ops(self):
node_id = self.node['uuid']
@@ -234,7 +237,7 @@
extra=extra)
new_address = data_utils.rand_mac_address()
- new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+ new_extra = {'key1': 0.123, 'key3': {'cat': 'meow'}}
patch = [{'path': '/address',
'op': 'replace',
diff --git a/tempest/api/baremetal/admin/test_ports_negative.py b/tempest/api/baremetal/admin/test_ports_negative.py
index ead3799..8080eb6 100644
--- a/tempest/api/baremetal/admin/test_ports_negative.py
+++ b/tempest/api/baremetal/admin/test_ports_negative.py
@@ -34,15 +34,6 @@
self.create_port, node_id=node_id, address=address)
@test.attr(type=['negative', 'smoke'])
- def test_create_port_malformed_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key': 0.123}
- self.assertRaises(exc.BadRequest,
- self.create_port, node_id=node_id,
- address=address, extra=extra)
-
- @test.attr(type=['negative', 'smoke'])
def test_create_port_nonexsistent_node_id(self):
node_id = str(data_utils.rand_uuid())
address = data_utils.rand_mac_address()
@@ -160,31 +151,6 @@
'value': new_address}])
@test.attr(type=['negative', 'smoke'])
- def test_update_port_add_malformed_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/extra/key', ' op': 'add',
- 'value': 0.123}])
-
- @test.attr(type=['negative', 'smoke'])
- def test_update_port_add_whole_malformed_extra(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
- [{'path': '/extra',
- 'op': 'add',
- 'value': [1, 2, 3, 4, 'a']}])
-
- @test.attr(type=['negative', 'smoke'])
def test_update_port_add_nonexistent_property(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
@@ -257,37 +223,6 @@
self.client.update_port, port_id, patch)
@test.attr(type=['negative', 'smoke'])
- def test_update_port_replace_extra_item_with_malformed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
- extra = {'key': 'value'}
-
- _, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- port_id = port['uuid']
-
- patch = [{'path': '/extra/key',
- 'op': 'replace',
- 'value': 0.123}]
- self.assertRaises(exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative', 'smoke'])
- def test_update_port_replace_whole_extra_with_malformed(self):
- node_id = self.node['uuid']
- address = data_utils.rand_mac_address()
-
- _, port = self.create_port(node_id=node_id, address=address)
- port_id = port['uuid']
-
- patch = [{'path': '/extra',
- 'op': 'replace',
- 'value': [1, 2, 3, 4, 'a']}]
-
- self.assertRaises(exc.BadRequest,
- self.client.update_port, port_id, patch)
-
- @test.attr(type=['negative', 'smoke'])
def test_update_port_replace_nonexistent_property(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 25e14a8..46e7251 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -38,11 +38,11 @@
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
elif ext:
- self.assertIn(ext, map(lambda x: x['name'], extensions))
+ self.assertIn(ext, map(lambda x: x['alias'], extensions))
else:
raise self.skipException('There are not any extensions configured')
# Log extensions list
- extension_list = map(lambda x: x['name'], extensions)
+ extension_list = map(lambda x: x['alias'], extensions)
LOG.debug("Nova extensions: %s" % ','.join(extension_list))
@test.requires_ext(extension='os-consoles', service='compute')
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 12b8887..0c36820 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -38,6 +38,8 @@
Update firewall policy
Insert firewall rule to policy
Remove firewall rule from policy
+ Insert firewall rule after/before rule in policy
+ Update firewall policy audited attribute
Delete firewall policy
Show firewall policy
List firewall
@@ -222,14 +224,14 @@
self.client.delete_firewall(firewall_id)
@test.attr(type='smoke')
- def test_insert_remove_firewall_rule_from_policy(self):
+ def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
# Create firewall rule
resp, body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
- fw_rule_id = body['firewall_rule']['id']
- self.addCleanup(self._try_delete_rule, fw_rule_id)
+ fw_rule_id1 = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id1)
# Create firewall policy
_, body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
@@ -238,19 +240,76 @@
# Insert rule to firewall policy
self.client.insert_firewall_rule_in_policy(
- fw_policy_id, fw_rule_id, '', '')
+ fw_policy_id, fw_rule_id1, '', '')
# Verify insertion of rule in policy
- self.assertIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+ self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
+ # Create another firewall rule
+ _, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="icmp")
+ fw_rule_id2 = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id2)
+
+ # Insert rule to firewall policy after the first rule
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id2, fw_rule_id1, '')
+
+ # Verify the posiition of rule after insertion
+ _, fw_rule = self.client.show_firewall_rule(
+ fw_rule_id2)
+
+ self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
- fw_policy_id, fw_rule_id)
+ fw_policy_id, fw_rule_id2)
+ # Insert rule to firewall policy before the first rule
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id2, '', fw_rule_id1)
+ # Verify the posiition of rule after insertion
+ _, fw_rule = self.client.show_firewall_rule(
+ fw_rule_id2)
+ self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
+ # Remove rule from the firewall policy
+ self.client.remove_firewall_rule_from_policy(
+ fw_policy_id, fw_rule_id2)
+ # Verify removal of rule from firewall policy
+ self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
+
+ # Remove rule from the firewall policy
+ self.client.remove_firewall_rule_from_policy(
+ fw_policy_id, fw_rule_id1)
# Verify removal of rule from firewall policy
- self.assertNotIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+ self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
def _get_list_fw_rule_ids(self, fw_policy_id):
_, fw_policy = self.client.show_firewall_policy(
fw_policy_id)
return [ruleid for ruleid in fw_policy['firewall_policy']
['firewall_rules']]
+
+ def test_update_firewall_policy_audited_attribute(self):
+ # Create firewall rule
+ _, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="icmp")
+ fw_rule_id = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id)
+ # Create firewall policy
+ _, body = self.client.create_firewall_policy(
+ name=data_utils.rand_name('fw-policy'))
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+ self.assertFalse(body['firewall_policy']['audited'])
+ # Update firewall policy audited attribute to ture
+ self.client.update_firewall_policy(fw_policy_id,
+ audited=True)
+ # Insert Firewall rule to firewall policy
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id, '', '')
+ _, body = self.client.show_firewall_policy(
+ fw_policy_id)
+ self.assertFalse(body['firewall_policy']['audited'])
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index a03e587..9cd5e2e 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -66,6 +66,23 @@
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
+ def test_create_bulk_port(self):
+ network1 = self.network
+ name = data_utils.rand_name('network-')
+ network2 = self.create_network(network_name=name)
+ network_list = [network1['id'], network2['id']]
+ port_list = [{'network_id': net_id} for net_id in network_list]
+ _, body = self.client.create_bulk_port(port_list)
+ created_ports = body['ports']
+ port1 = created_ports[0]
+ port2 = created_ports[1]
+ self.addCleanup(self._delete_port, port1['id'])
+ self.addCleanup(self._delete_port, port2['id'])
+ self.assertEqual(port1['network_id'], network1['id'])
+ self.assertEqual(port2['network_id'], network2['id'])
+ self.assertTrue(port1['admin_state_up'])
+ self.assertTrue(port2['admin_state_up'])
+
@test.attr(type='smoke')
def test_show_port(self):
# Verify the details of port
diff --git a/tempest/api/network/test_service_type_management.py b/tempest/api/network/test_service_type_management.py
index 6695f47..7f8b479 100644
--- a/tempest/api/network/test_service_type_management.py
+++ b/tempest/api/network/test_service_type_management.py
@@ -24,6 +24,7 @@
msg = "Neutron Service Type Management not enabled."
raise cls.skipException(msg)
+ @test.skip_because(bug="1400370")
@test.attr(type='smoke')
def test_service_provider_list(self):
_, body = self.client.list_service_providers()
diff --git a/tempest/api/volume/test_extensions.py b/tempest/api/volume/test_extensions.py
index 66ea9b7..0f6c2d6 100644
--- a/tempest/api/volume/test_extensions.py
+++ b/tempest/api/volume/test_extensions.py
@@ -39,7 +39,7 @@
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
elif ext:
- self.assertIn(ext, map(lambda x: x['name'], extensions))
+ self.assertIn(ext, map(lambda x: x['alias'], extensions))
else:
raise self.skipException('There are not any extensions configured')
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index f426e4d..6f74c3e 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -165,21 +165,19 @@
def verify_extensions(os, service, results):
extensions_client = get_extension_client(os, service)
__, resp = extensions_client.list_extensions()
+ # For Nova, Cinder and Neutron we use the alias name rather than the
+ # 'name' field because the alias is considered to be the canonical
+ # name.
if isinstance(resp, dict):
- # For both Nova and Neutron we use the alias name rather than the
- # 'name' field because the alias is considered to be the canonical
- # name.
- if service in ['nova', 'nova_v3', 'neutron']:
- extensions = map(lambda x: x['alias'], resp['extensions'])
- elif service == 'swift':
+ if service == 'swift':
# Remove Swift general information from extensions list
resp.pop('swift')
extensions = resp.keys()
else:
- extensions = map(lambda x: x['name'], resp['extensions'])
+ extensions = map(lambda x: x['alias'], resp['extensions'])
else:
- extensions = map(lambda x: x['name'], resp)
+ extensions = map(lambda x: x['alias'], resp)
if not results.get(service):
results[service] = {}
extensions_opt = get_enabled_extensions(service)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 4c3905c..1df8896 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -33,25 +33,11 @@
# redrive rate limited calls at most twice
MAX_RECURSION_DEPTH = 2
-TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
# All the successful HTTP status codes from RFC 7231 & 4918
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206, 207)
-# convert a structure into a string safely
-def safe_body(body, maxlen=4096):
- try:
- text = six.text_type(body)
- except UnicodeDecodeError:
- # if this isn't actually text, return marker that
- return "<BinaryData: removed>"
- if len(text) > maxlen:
- return text[:maxlen]
- else:
- return text
-
-
class ResponseBody(dict):
"""Class that wraps an http response and dict body into a single value.
@@ -296,6 +282,18 @@
return resp[i]
return ""
+ def _safe_body(self, body, maxlen=4096):
+ # convert a structure into a string safely
+ try:
+ text = six.text_type(body)
+ except UnicodeDecodeError:
+ # if this isn't actually text, return marker that
+ return "<BinaryData: removed>"
+ if len(text) > maxlen:
+ return text[:maxlen]
+ else:
+ return text
+
def _log_request_start(self, method, req_url, req_headers=None,
req_body=None):
if req_headers is None:
@@ -326,9 +324,9 @@
req_url,
secs,
str(req_headers),
- safe_body(req_body),
+ self._safe_body(req_body),
str(resp),
- safe_body(resp_body)),
+ self._safe_body(resp_body)),
extra=extra)
def _log_request(self, method, req_url, resp,
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 52568cb..93f02c9 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -111,22 +111,24 @@
while image['status'] != status:
time.sleep(client.build_interval)
resp, image = client.get_image(image_id)
- if image['status'] == 'ERROR':
+ status_curr = image['status']
+ if status_curr == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
# check the status again to avoid a false negative where we hit
# the timeout at the same time that the image reached the expected
# status
- if image['status'] == status:
+ if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
- message = ('Image %(image_id)s failed to reach %(status)s '
- 'status within the required time (%(timeout)s s).' %
+ message = ('Image %(image_id)s failed to reach %(status)s state'
+ '(current state %(status_curr)s) '
+ 'within the required time (%(timeout)s s).' %
{'image_id': image_id,
'status': status,
+ 'status_curr': status_curr,
'timeout': client.build_timeout})
- message += ' Current status: %s.' % image['status']
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
@@ -144,7 +146,8 @@
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
- if node[attr] == status:
+ status_curr = node[attr]
+ if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
@@ -154,7 +157,7 @@
'attr': attr,
'status': status,
'timeout': client.build_timeout})
- message += ' Current state of %s: %s.' % (attr, node[attr])
+ message += ' Current state of %s: %s.' % (attr, status_curr)
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index cc31fad..213d5de 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -75,7 +75,7 @@
message = 'Unauthorized'
-class InvalidServiceTag(RestClientException):
+class InvalidServiceTag(TempestException):
message = "Invalid service tag"
@@ -140,15 +140,15 @@
message = "Endpoint not found"
-class RateLimitExceeded(TempestException):
+class RateLimitExceeded(RestClientException):
message = "Rate limit exceeded"
-class OverLimit(TempestException):
+class OverLimit(RestClientException):
message = "Quota exceeded"
-class ServerFault(TempestException):
+class ServerFault(RestClientException):
message = "Got server fault"
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 522aa43..e46ec6d 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -297,7 +297,19 @@
return secgroup
- def get_remote_client(self, server_or_ip, username=None, private_key=None):
+ def get_remote_client(self, server_or_ip, username=None, private_key=None,
+ log_console_of_servers=None):
+ """Get a SSH client to a remote server
+
+ @param server_or_ip a server object as returned by Tempest compute
+ client or an IP address to connect to
+ @param username name of the Linux account on the remote server
+ @param private_key the SSH private key to use
+ @param log_console_of_servers a list of server objects. Each server
+ in the list will have its console printed in the logs in case the
+ SSH connection failed to be established
+ @return a RemoteClient object
+ """
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
@@ -312,9 +324,13 @@
pkey=private_key)
try:
linux_client.validate_authentication()
- except exceptions.SSHTimeout:
- LOG.exception('ssh connection to %s failed' % ip)
+ except Exception:
+ LOG.exception('Initializing SSH connection to %s failed' % ip)
debug.log_net_debug()
+ # If we don't explicitely set for which servers we want to
+ # log the console output then all the servers will be logged.
+ # See the definition of _log_console_output()
+ self._log_console_output(log_console_of_servers)
raise
return linux_client
@@ -989,6 +1005,10 @@
self.addCleanup(self.delete_wrapper, router.delete)
return router
+ def _update_router_admin_state(self, router, admin_state_up):
+ router.update(admin_state_up=admin_state_up)
+ self.assertEqual(admin_state_up, router.admin_state_up)
+
def create_networks(self, client=None, tenant_id=None):
"""Create a network with a subnet connected to a router.
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index ddfabe4..f09f00c 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -119,14 +119,8 @@
if self.keypair:
# Check that the user can authenticate with the generated
# keypair
- try:
- linux_client = self.get_remote_client(
- server_ip, username='ec2-user')
- linux_client.validate_authentication()
- except (exceptions.ServerUnreachable,
- exceptions.SSHTimeout) as e:
- self._log_console_output(servers=[server])
- raise e
+ self.get_remote_client(server_ip, username='ec2-user',
+ log_console_of_servers=[server])
@test.attr(type='slow')
@test.skip_because(bug='1374175')
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 59af6b3..16a65c9 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.common import custom_matchers
-from tempest.common import debug
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -89,17 +88,6 @@
self.servers_client.reboot(self.server['id'], 'SOFT')
self._wait_for_server_status('ACTIVE')
- def ssh_to_server(self):
- try:
- self.linux_client = self.get_remote_client(self.floating_ip['ip'])
- except Exception as e:
- LOG.exception('ssh to server failed')
- self._log_console_output()
- # network debug is called as part of ssh init
- if not isinstance(e, test.exceptions.SSHTimeout):
- debug.log_net_debug()
- raise
-
def check_partitions(self):
# NOTE(andreaf) The device name may be different on different guest OS
partitions = self.linux_client.get_partitions()
@@ -147,7 +135,9 @@
self.floating_ip = self.create_floating_ip(self.server)
self.create_and_add_security_group()
- self.ssh_to_server()
+
+ self.linux_client = self.get_remote_client(self.floating_ip['ip'])
self.nova_reboot()
- self.ssh_to_server()
+
+ self.linux_client = self.get_remote_client(self.floating_ip['ip'])
self.check_partitions()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 9618124..d24eb01 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -170,8 +170,9 @@
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
- def check_public_network_connectivity(self, should_connect=True,
- msg=None):
+ def check_public_network_connectivity(
+ self, should_connect=True, msg=None,
+ should_check_floating_ip_status=True):
"""Verifies connectivty to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
@@ -180,6 +181,8 @@
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
+ :param should_check_floating_ip_status: bool. should status of
+ floating_ip be checked or not
"""
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
@@ -193,7 +196,8 @@
super(TestNetworkBasicOps, self).check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
- self.check_floating_ip_status(floating_ip, floatingip_status)
+ if should_check_floating_ip_status:
+ self.check_floating_ip_status(floating_ip, floatingip_status)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
@@ -393,3 +397,34 @@
self._create_new_network()
self._hotplug_server()
self._check_network_internal_connectivity(network=self.new_net)
+
+ @testtools.skipIf(CONF.baremetal.driver_enabled,
+ 'Router state cannot be altered on a shared baremetal '
+ 'network')
+ @test.attr(type='smoke')
+ @test.services('compute', 'network')
+ def test_update_router_admin_state(self):
+ """
+ 1. Check public connectivity before updating
+ admin_state_up attribute of router to False
+ 2. Check public connectivity after updating
+ admin_state_up attribute of router to False
+ 3. Check public connectivity after updating
+ admin_state_up attribute of router to True
+ """
+ self._setup_network_and_servers()
+ self.check_public_network_connectivity(
+ should_connect=True, msg="before updating "
+ "admin_state_up of router to False")
+ self._update_router_admin_state(self.router, False)
+ # TODO(alokmaurya): Remove should_check_floating_ip_status=False check
+ # once bug 1396310 is fixed
+
+ self.check_public_network_connectivity(
+ should_connect=False, msg="after updating "
+ "admin_state_up of router to False",
+ should_check_floating_ip_status=False)
+ self._update_router_admin_state(self.router, True)
+ self.check_public_network_connectivity(
+ should_connect=True, msg="after updating "
+ "admin_state_up of router to True")
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 7e512a9..23743c5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -88,15 +88,10 @@
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], self.instance['id'])
# Check ssh
- try:
- self.get_remote_client(
- server_or_ip=floating_ip['ip'],
- username=self.image_utils.ssh_user(self.image_ref),
- private_key=self.keypair['private_key'])
- except Exception:
- LOG.exception('ssh to server failed')
- self._log_console_output()
- raise
+ self.get_remote_client(
+ server_or_ip=floating_ip['ip'],
+ username=self.image_utils.ssh_user(self.image_ref),
+ private_key=self.keypair['private_key'])
@test.services('compute', 'network')
def test_server_basicops(self):
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 159585b..5cb7c99 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -47,21 +47,13 @@
def _add_keypair(self):
self.keypair = self.create_keypair()
- def _ssh_to_server(self, server_or_ip):
- try:
- return self.get_remote_client(server_or_ip)
- except Exception:
- LOG.exception('Initializing SSH connection failed')
- self._log_console_output()
- raise
-
def _write_timestamp(self, server_or_ip):
- ssh_client = self._ssh_to_server(server_or_ip)
+ ssh_client = self.get_remote_client(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
- ssh_client = self._ssh_to_server(server_or_ip)
+ ssh_client = self.get_remote_client(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index dd115e7..c584a6e 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -110,14 +110,8 @@
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
- try:
- return self.get_remote_client(
- ip,
- private_key=keypair['private_key'])
- except Exception:
- LOG.exception('ssh to server failed')
- self._log_console_output(servers=[server])
- raise
+ return self.get_remote_client(ip, private_key=keypair['private_key'],
+ log_console_of_servers=[server])
def _get_content(self, ssh_client):
return ssh_client.exec_command('cat /tmp/text')
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 83c253a..620ed68 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -79,9 +79,10 @@
timed_out = int(time.time()) - start >= self.build_timeout
if interface_status != status and timed_out:
- message = ('Interface %s failed to reach %s status within '
- 'the required time (%s s).' %
- (port_id, status, self.build_timeout))
+ message = ('Interface %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (port_id, status, interface_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
return resp, body
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index b23b20b..afa6937 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -103,9 +103,10 @@
raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
if int(time.time()) - start >= self.build_timeout:
- message = ('Volume %s failed to reach %s status within '
- 'the required time (%s s).' %
- (volume_id, status, self.build_timeout))
+ message = ('Volume %s failed to reach %s status (current %s) '
+ 'within the required time (%s s).' %
+ (volume_id, status, volume_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
def is_resource_deleted(self, id):
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index e99c124..ccc20c8 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -80,9 +80,10 @@
timed_out = int(time.time()) - start >= self.build_timeout
if interface_status != status and timed_out:
- message = ('Interface %s failed to reach %s status within '
- 'the required time (%s s).' %
- (port_id, status, self.build_timeout))
+ message = ('Interface %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (port_id, status, interface_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
return resp, body
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 5ad5f37..2c767d9 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -259,6 +259,7 @@
# At this point, the wait has timed out
message = 'Resource %s' % (str(resource))
message += ' failed to reach status %s' % status
+ message += ' (current: %s)' % resource['status']
message += ' within the required time %s' % timeout
caller = misc.find_test_caller()
if caller:
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 15306a0..9b4700a 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -185,9 +185,12 @@
resource_status_reason=body['resource_status_reason'])
if int(time.time()) - start >= self.build_timeout:
- message = ('Resource %s failed to reach %s status within '
- 'the required time (%s s).' %
- (resource_name, status, self.build_timeout))
+ message = ('Resource %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (resource_name,
+ status,
+ resource_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
@@ -214,9 +217,10 @@
stack_status_reason=body['stack_status_reason'])
if int(time.time()) - start >= self.build_timeout:
- message = ('Stack %s failed to reach %s status within '
- 'the required time (%s s).' %
- (stack_name, status, self.build_timeout))
+ message = ('Stack %s failed to reach %s status (current: %s) '
+ 'within the required time (%s s).' %
+ (stack_name, status, stack_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
diff --git a/tempest/services/volume/json/backups_client.py b/tempest/services/volume/json/backups_client.py
index da47639..51a017e 100644
--- a/tempest/services/volume/json/backups_client.py
+++ b/tempest/services/volume/json/backups_client.py
@@ -95,9 +95,10 @@
raise exceptions.VolumeBackupException(backup_id=backup_id)
if int(time.time()) - start >= self.build_timeout:
- message = ('Volume backup %s failed to reach %s status within '
- 'the required time (%s s).' %
- (backup_id, status, self.build_timeout))
+ message = ('Volume backup %s failed to reach %s status '
+ '(current %s) within the required time (%s s).' %
+ (backup_id, status, backup_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index cf2837b..1e49e5a 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -174,10 +174,12 @@
raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
if int(time.time()) - start >= self.build_timeout:
- message = 'Volume %s failed to reach %s status within '\
- 'the required time (%s s).' % (volume_id,
- status,
- self.build_timeout)
+ message = ('Volume %s failed to reach %s status (current: %s) '
+ 'within the required time '
+ '(%s s).' % (volume_id,
+ status,
+ volume_status,
+ self.build_timeout))
raise exceptions.TimeoutException(message)
def is_resource_deleted(self, id):
diff --git a/tempest/test.py b/tempest/test.py
index 7db0376..6deb42b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -224,6 +224,23 @@
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
+ """The test base class defines Tempest framework for class level fixtures.
+ `setUpClass` and `tearDownClass` are defined here and cannot be overwritten
+ by subclasses (enforced via hacking rule T105).
+
+ Set-up is split in a series of steps (setup stages), which can be
+ overwritten by test classes. Set-up stages are:
+ - skip_checks
+ - setup_credentials
+ - setup_clients
+ - resource_setup
+
+ Tear-down is also split in a series of steps (teardown stages), which are
+ stacked for execution only if the corresponding setup stage had been
+ reached during the setup phase. Tear-down stages are:
+ - clear_isolated_creds (defined in the base test class)
+ - resource_cleanup
+ """
setUpClassCalled = False
_service = None
@@ -242,31 +259,28 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
- # No test resource is allocated until here
+ # Stack of (name, callable) to be invoked in reverse order at teardown
+ cls.teardowns = []
+ # All the configuration checks that may generate a skip
+ cls.skip_checks()
try:
- # TODO(andreaf) Split-up resource_setup in stages:
- # skip checks, pre-hook, credentials, clients, resources, post-hook
+ # Allocation of all required credentials and client managers
+ cls.teardowns.append(('credentials', cls.clear_isolated_creds))
+ cls.setup_credentials()
+ # Shortcuts to clients
+ cls.setup_clients()
+ # Additional class-wide test resources
+ cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
- LOG.info("%s in resource setup. Invoking tearDownClass." % etype)
- # Catch any exception in tearDown so we can re-raise the original
- # exception at the end
+ LOG.info("%s in %s.setUpClass. Invoking tearDownClass." % (
+ cls.__name__, etype))
+ cls.tearDownClass()
try:
- cls.tearDownClass()
- except Exception as te:
- tetype, _, _ = sys.exc_info()
- # TODO(gmann): Till we split-up resource_setup &
- # resource_cleanup in more structural way, log
- # AttributeError as info instead of exception.
- if tetype is AttributeError:
- LOG.info("tearDownClass failed: %s" % te)
- else:
- LOG.exception("tearDownClass failed: %s" % te)
- try:
- raise etype(value), None, trace
+ raise etype, value, trace
finally:
- del trace # for avoiding circular refs
+ del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
@@ -274,21 +288,78 @@
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
- try:
- cls.resource_cleanup()
- finally:
- cls.clear_isolated_creds()
+ # Save any existing exception, we always want to re-raise the original
+ # exception only
+ etype, value, trace = sys.exc_info()
+ # If there was no exception during setup we shall re-raise the first
+ # exception in teardown
+ re_raise = (etype is None)
+ while cls.teardowns:
+ name, teardown = cls.teardowns.pop()
+ # Catch any exception in tearDown so we can re-raise the original
+ # exception at the end
+ try:
+ teardown()
+ except Exception as te:
+ sys_exec_info = sys.exc_info()
+ tetype = sys_exec_info[0]
+ # TODO(andreaf): Till we have the ability to cleanup only
+ # resources that were successfully setup in resource_cleanup,
+ # log AttributeError as info instead of exception.
+ if tetype is AttributeError and name == 'resources':
+ LOG.info("tearDownClass of %s failed: %s" % (name, te))
+ else:
+ LOG.exception("teardown of %s failed: %s" % (name, te))
+ if not etype:
+ etype, value, trace = sys_exec_info
+ # If exceptions were raised during teardown, an not before, re-raise
+ # the first one
+ if re_raise and etype is not None:
+ try:
+ raise etype, value, trace
+ finally:
+ del trace # to avoid circular refs
@classmethod
def resource_setup(cls):
- """Class level setup steps for test cases.
- Recommended order: skip checks, credentials, clients, resources.
+ """Class level resource setup for test cases.
"""
pass
@classmethod
def resource_cleanup(cls):
- """Class level resource cleanup for test cases. """
+ """Class level resource cleanup for test cases.
+ Resource cleanup must be able to handle the case of partially setup
+ resources, in case a failure during `resource_setup` should happen.
+ """
+ pass
+
+ @classmethod
+ def skip_checks(cls):
+ """Class level skip checks. Subclasses verify in here all
+ conditions that might prevent the execution of the entire test class.
+ Checks implemented here may not make use API calls, and should rely on
+ configuration alone.
+ In general skip checks that require an API call are discouraged.
+ If one is really needed it may be implemented either in the
+ resource_setup or at test level.
+ """
+ pass
+
+ @classmethod
+ def setup_credentials(cls):
+ """Allocate credentials and the client managers from them."""
+ # TODO(andreaf) There is a fair amount of code that could me moved from
+ # base / test classes in here. Ideally tests should be able to only
+ # specify a list of (additional) credentials the need to use.
+ pass
+
+ @classmethod
+ def setup_clients(cls):
+ """Create links to the clients into the test object."""
+ # TODO(andreaf) There is a fair amount of code that could me moved from
+ # base / test classes in here. Ideally tests should be able to only
+ # specify which client is `client` and nothing else.
pass
def setUp(self):
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 6679c79..b672b86 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -261,9 +261,9 @@
def test_verify_extensions_cinder(self):
def fake_list_extensions():
- return (None, {'extensions': [{'name': 'fake1'},
- {'name': 'fake2'},
- {'name': 'not_fake'}]})
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
@@ -283,9 +283,9 @@
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
- return (None, {'extensions': [{'name': 'fake1'},
- {'name': 'fake2'},
- {'name': 'not_fake'}]})
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
@@ -300,9 +300,8 @@
def test_verify_extensions_nova(self):
def fake_list_extensions():
- return (None, {'extensions': [{'alias': 'fake1'},
- {'alias': 'fake2'},
- {'alias': 'not_fake'}]})
+ return (None, [{'alias': 'fake1'}, {'alias': 'fake2'},
+ {'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
@@ -339,9 +338,9 @@
def test_verify_extensions_nova_v3(self):
def fake_list_extensions():
- return (None, {'extensions': [{'alias': 'fake1'},
- {'alias': 'fake2'},
- {'alias': 'not_fake'}]})
+ return (None, [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_v3_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(