Merge "unskip list server actions tests"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 2ed2582..858fce9 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -109,6 +109,9 @@
# value)
#driver_enabled=false
+# Driver name which Ironic uses (string value)
+#driver=fake
+
# The endpoint type to use for the baremetal provisioning
# service (string value)
#endpoint_type=publicURL
diff --git a/tempest/api/baremetal/test_drivers.py b/tempest/api/baremetal/test_drivers.py
index 445ca60..5e1e310 100644
--- a/tempest/api/baremetal/test_drivers.py
+++ b/tempest/api/baremetal/test_drivers.py
@@ -13,14 +13,23 @@
# under the License.
from tempest.api.baremetal import base
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class TestDrivers(base.BaseBaremetalTest):
"""Tests for drivers."""
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(TestDrivers, cls).setUpClass()
+ cls.driver_name = CONF.baremetal.driver
@test.attr(type="smoke")
def test_list_drivers(self):
resp, drivers = self.client.list_drivers()
self.assertEqual('200', resp['status'])
- self.assertIn('fake', [d['name'] for d in drivers['drivers']])
+ self.assertIn(self.driver_name,
+ [d['name'] for d in drivers['drivers']])
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index a307986..d1a8faf 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -85,3 +85,50 @@
self.assertEqual('200', resp['status'])
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
+
+ @test.requires_ext(extension='lbaas', service='network')
+ @test.attr(type='gate')
+ def test_lbaas_quotas(self):
+ # Add a tenant to conduct the test
+ test_tenant = data_utils.rand_name('test_tenant_')
+ test_description = data_utils.rand_name('desc_')
+ _, tenant = self.identity_admin_client.create_tenant(
+ name=test_tenant,
+ description=test_description)
+ tenant_id = tenant['id']
+ self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+ # Change lbaas quotas for tenant
+ new_quotas = {'vip': 1, 'pool': 2,
+ 'member': 3, 'health_monitor': 4}
+
+ resp, quota_set = self.admin_client.update_quotas(tenant_id,
+ **new_quotas)
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self.admin_client.reset_quotas, tenant_id)
+ self.assertEqual(1, quota_set['vip'])
+ self.assertEqual(2, quota_set['pool'])
+ self.assertEqual(3, quota_set['member'])
+ self.assertEqual(4, quota_set['health_monitor'])
+ # Confirm our tenant is listed among tenants with non default quotas
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ found = False
+ for qs in non_default_quotas['quotas']:
+ if qs['tenant_id'] == tenant_id:
+ found = True
+ self.assertTrue(found)
+ # Confirm from APi quotas were changed as requested for tenant
+ resp, quota_set = self.admin_client.show_quotas(tenant_id)
+ quota_set = quota_set['quota']
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(1, quota_set['vip'])
+ self.assertEqual(2, quota_set['pool'])
+ self.assertEqual(3, quota_set['member'])
+ self.assertEqual(4, quota_set['health_monitor'])
+ # Reset quotas to default and confirm
+ resp, body = self.admin_client.reset_quotas(tenant_id)
+ self.assertEqual('204', resp['status'])
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ for q in non_default_quotas['quotas']:
+ self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index e0e26da..c897716 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -70,6 +70,27 @@
self.assertTrue(port, msg)
self._confirm_allowed_address_pair(port[0], self.ip_address)
+ @test.attr(type='smoke')
+ def test_update_port_with_address_pair(self):
+ # Create a port without allowed address pair
+ resp, body = self.client.create_port(network_id=self.network['id'])
+ self.assertEqual('201', resp['status'])
+ port_id = body['port']['id']
+ self.addCleanup(self.client.delete_port, port_id)
+
+ # Confirm port is created
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual('200', resp['status'])
+
+ # Update allowed address pair attribute of port
+ allowed_address_pairs = [{'ip_address': self.ip_address,
+ 'mac_address': self.mac_address}]
+ resp, body = self.client.update_port(port_id,
+ allowed_address_pairs=allowed_address_pairs)
+ self.assertEqual('200', resp['status'])
+ newport = body['port']
+ self._confirm_allowed_address_pair(newport, self.ip_address)
+
def _confirm_allowed_address_pair(self, port, ip):
msg = 'Port allowed address pairs should not be empty'
self.assertTrue(port['allowed_address_pairs'], msg)
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index d1fe15c..0cc3f19 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -22,19 +22,15 @@
CONF = config.CONF
-class VPNaaSTestJSON(base.BaseNetworkTest):
+class VPNaaSTestJSON(base.BaseAdminNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
-
- List VPN Services
- Show VPN Services
- Create VPN Services
- Update VPN Services
- Delete VPN Services
+ List, Show, Create, Delete, and Update VPN Service
List, Show, Create, Delete, and Update IKE policy
+ List, Show, Create, Delete, and Update IPSec policy
"""
@classmethod
@@ -47,11 +43,12 @@
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
- data_utils.rand_name("router-"),
+ data_utils.rand_name("router"),
external_network_id=CONF.network.public_network_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
cls.router['id'])
+
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
@@ -87,6 +84,85 @@
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
+ def _delete_vpn_service(self, vpn_service_id):
+ resp, _ = self.client.delete_vpnservice(vpn_service_id)
+ self.assertEqual('204', resp['status'])
+ # Asserting if vpn service is found in the list after deletion
+ _, body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertNotIn(vpn_service_id, vpn_services)
+
+ def _get_tenant_id(self):
+ """
+ Returns the tenant_id of the client current user
+ """
+ # TODO(jroovers) This is a temporary workaround to get the tenant_id
+ # of the the current client. Replace this once tenant_isolation for
+ # neutron is fixed.
+ _, body = self.client.show_network(self.network['id'])
+ return body['network']['tenant_id']
+
+ @test.attr(type='smoke')
+ def test_admin_create_ipsec_policy_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+ # Create IPSec policy for the newly created tenant
+ name = data_utils.rand_name('ipsec-policy')
+ resp, body = (self.admin_client.
+ create_ipsecpolicy(name=name, tenant_id=tenant_id))
+ self.assertEqual('201', resp['status'])
+ ipsecpolicy = body['ipsecpolicy']
+ self.assertIsNotNone(ipsecpolicy['id'])
+ self.addCleanup(self.admin_client.delete_ipsecpolicy,
+ ipsecpolicy['id'])
+
+ # Assert that created ipsec policy is found in API list call
+ _, body = self.client.list_ipsecpolicies()
+ ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
+ self.assertIn(ipsecpolicy['id'], ipsecpolicies)
+
+ @test.attr(type='smoke')
+ def test_admin_create_vpn_service_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+
+ # Create vpn service for the newly created tenant
+ name = data_utils.rand_name('vpn-service')
+ resp, body = self.admin_client.create_vpnservice(
+ subnet_id=self.subnet['id'],
+ router_id=self.router['id'],
+ name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ self.assertEqual('201', resp['status'])
+ vpnservice = body['vpnservice']
+ self.assertIsNotNone(vpnservice['id'])
+ self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
+
+ # Assert that created vpnservice is found in API list call
+ _, body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertIn(vpnservice['id'], vpn_services)
+
+ @test.attr(type='smoke')
+ def test_admin_create_ike_policy_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+
+ # Create IKE policy for the newly created tenant
+ name = data_utils.rand_name('ike-policy')
+ resp, body = (self.admin_client.
+ create_ikepolicy(name=name, ike_version="v1",
+ encryption_algorithm="aes-128",
+ auth_algorithm="sha1",
+ tenant_id=tenant_id))
+ self.assertEqual('201', resp['status'])
+ ikepolicy = body['ikepolicy']
+ self.assertIsNotNone(ikepolicy['id'])
+ self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
+
+ # Assert that created ike policy is found in API list call
+ _, body = self.client.list_ikepolicies()
+ ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
+ self.assertIn(ikepolicy['id'], ikepolicies)
+
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
@@ -97,14 +173,15 @@
@test.attr(type='smoke')
def test_create_update_delete_vpn_service(self):
- # Creates a VPN service
- name = data_utils.rand_name('vpn-service-')
+ # Creates a VPN service and sets up deletion
+ name = data_utils.rand_name('vpn-service')
resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
router_id=self.router['id'],
name=name,
admin_state_up=True)
self.assertEqual('201', resp['status'])
vpnservice = body['vpnservice']
+ self.addCleanup(self._delete_vpn_service, vpnservice['id'])
# Assert if created vpnservices are not found in vpnservices list
resp, body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
@@ -116,14 +193,6 @@
# But precondition is that current state of vpnservice
# should be "ACTIVE" not "PENDING*"
- # Verification of vpn service delete
- resp, body = self.client.delete_vpnservice(vpnservice['id'])
- self.assertEqual('204', resp['status'])
- # Asserting if vpn service is found in the list after deletion
- resp, body = self.client.list_vpnservices()
- vpn_services = [vs['id'] for vs in body['vpnservices']]
- self.assertNotIn(vpnservice['id'], vpn_services)
-
@test.attr(type='smoke')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
@@ -137,6 +206,9 @@
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
+ valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
+ "PENDING_UPDATE", "PENDING_DELETE"]
+ self.assertIn(vpnservice['status'], valid_status)
@test.attr(type='smoke')
def test_list_ike_policies(self):
@@ -149,7 +221,7 @@
@test.attr(type='smoke')
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
- name = data_utils.rand_name('ike-policy-')
+ name = data_utils.rand_name('ike-policy')
resp, body = (self.client.create_ikepolicy(
name=name,
ike_version="v1",
@@ -157,19 +229,31 @@
auth_algorithm="sha1"))
self.assertEqual('201', resp['status'])
ikepolicy = body['ikepolicy']
+ self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
- # Verification of ike policy update
- description = "Updated ike policy"
- new_ike = {'description': description, 'pfs': 'group5',
- 'name': data_utils.rand_name("New-IKE-")}
- resp, body = self.client.update_ikepolicy(ikepolicy['id'],
- **new_ike)
+
+ # Update IKE Policy
+ new_ike = {'name': data_utils.rand_name("New-IKE"),
+ 'description': "Updated ike policy",
+ 'encryption_algorithm': "aes-256",
+ 'ike_version': "v2",
+ 'pfs': "group14",
+ 'lifetime': {'units': "seconds", 'value': 2000}}
+ resp, _ = self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
self.assertEqual('200', resp['status'])
- updated_ike_policy = body['ikepolicy']
- self.assertEqual(updated_ike_policy['description'], description)
+ # Confirm that update was successful by verifying using 'show'
+ _, body = self.client.show_ikepolicy(ikepolicy['id'])
+ ike_policy = body['ikepolicy']
+ for key, value in new_ike.iteritems():
+ self.assertIn(key, ike_policy)
+ self.assertEqual(value, ike_policy[key])
+
# Verification of ike policy delete
- resp, body = self.client.delete_ikepolicy(ikepolicy['id'])
+ resp, _ = self.client.delete_ikepolicy(ikepolicy['id'])
self.assertEqual('204', resp['status'])
+ _, body = self.client.list_ikepolicies()
+ ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
+ self.assertNotIn(ike_policy['id'], ikepolicies)
@test.attr(type='smoke')
def test_show_ike_policy(self):
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index 2519eb5..ad0aa29 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -48,7 +48,7 @@
}
}
-base_update_server = {
+base_update_get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index 5e9fbd5..dc4054c 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -43,7 +43,7 @@
}
}
-update_server = copy.deepcopy(servers.base_update_server)
+update_server = copy.deepcopy(servers.base_update_get_server)
update_server['response_body']['properties']['server']['properties'].update({
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
@@ -57,6 +57,39 @@
'hostId'
)
+get_server = copy.deepcopy(servers.base_update_get_server)
+get_server['response_body']['properties']['server']['properties'].update({
+ 'key_name': {'type': ['string', 'null']},
+ 'hostId': {'type': 'string'},
+
+ # NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
+ # attributes.
+ 'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
+ 'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
+ 'OS-EXT-AZ:availability_zone': {'type': 'string'},
+
+ # NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
+ # attributes.
+ 'OS-EXT-STS:task_state': {'type': ['string', 'null']},
+ 'OS-EXT-STS:vm_state': {'type': 'string'},
+ 'OS-EXT-STS:power_state': {'type': 'integer'},
+ 'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
+ 'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
+ 'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
+ 'os-extended-volumes:volumes_attached': {'type': 'array'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6,
+ 'config_drive': {'type': 'string'}
+})
+get_server['response_body']['properties']['server']['required'].append(
+ # NOTE: OS-SRV-USG, OS-EXT-AZ, OS-EXT-STS, OS-EXT-SRV-ATTR,
+ # os-extended-volumes, OS-DCF and accessIPv4/v6 are API
+ # extension, and some environments return a response without
+ # these attributes. So they are not 'required'.
+ 'hostId'
+)
+
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
@@ -175,6 +208,20 @@
'status_code': [204]
}
+list_server_groups = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server_groups': {
+ 'type': 'array',
+ 'items': common_server_group
+ }
+ },
+ 'required': ['server_groups']
+ }
+}
+
instance_actions_object = copy.deepcopy(servers.common_instance_actions)
instance_actions_object[
'properties'].update({'instance_uuid': {'type': 'string'}})
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 7572029..3b50516 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -54,7 +54,7 @@
['type', 'mac_addr']
)
-update_server = copy.deepcopy(servers.base_update_server)
+update_server = copy.deepcopy(servers.base_update_get_server)
update_server['response_body']['properties']['server']['properties'].update({
'addresses': addresses_v3,
'host_id': {'type': 'string'},
@@ -68,6 +68,43 @@
'host_id'
)
+get_server = copy.deepcopy(servers.base_update_get_server)
+get_server['response_body']['properties']['server']['properties'].update({
+ 'key_name': {'type': ['string', 'null']},
+ 'host_id': {'type': 'string'},
+
+ # NOTE: Non-admin users also can see "os-server-usage" and
+ # "os-extended-availability-zone" attributes.
+ 'os-server-usage:launched_at': {'type': ['string', 'null']},
+ 'os-server-usage:terminated_at': {'type': ['string', 'null']},
+ 'os-extended-availability-zone:availability_zone': {'type': 'string'},
+
+ # NOTE: Admin users only can see "os-extended-status" and
+ # "os-extended-server-attributes" attributes.
+ 'os-extended-status:task_state': {'type': ['string', 'null']},
+ 'os-extended-status:vm_state': {'type': 'string'},
+ 'os-extended-status:power_state': {'type': 'integer'},
+ 'os-extended-status:locked_by': {'type': ['string', 'null']},
+ 'os-extended-server-attributes:host': {'type': ['string', 'null']},
+ 'os-extended-server-attributes:instance_name': {'type': 'string'},
+ 'os-extended-server-attributes:hypervisor_hostname': {
+ 'type': ['string', 'null']
+ },
+ 'os-extended-volumes:volumes_attached': {'type': 'array'},
+ 'os-pci:pci_devices': {'type': 'array'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6,
+ 'os-config-drive:config_drive': {'type': 'string'}
+})
+get_server['response_body']['properties']['server']['required'].append(
+ # NOTE: os-server-usage, os-extended-availability-zone,
+ # os-extended-status, os-extended-server-attributes,
+ # os-extended-volumes, os-pci, os-access-ips and
+ # os-config-driveare API extension, and some environments
+ # return a response without these attributes. So they are not 'required'.
+ 'host_id'
+)
+
attach_detach_volume = {
'status_code': [202]
}
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index e9a0cee..946b89e 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -50,13 +50,20 @@
self.assertTableStruct(roles, ['Name', 'Value'])
def test_cinder_backup_list(self):
- self.cinder('backup-list')
+ backup_list = self.parser.listing(self.cinder('backup-list'))
+ self.assertTableStruct(backup_list, ['ID', 'Volume ID', 'Status',
+ 'Name', 'Size', 'Object Count',
+ 'Container'])
def test_cinder_extra_specs_list(self):
- self.cinder('extra-specs-list')
+ extra_specs_list = self.parser.listing(self.cinder('extra-specs-list'))
+ self.assertTableStruct(extra_specs_list, ['ID', 'Name', 'extra_specs'])
def test_cinder_volumes_list(self):
- self.cinder('list')
+ list = self.parser.listing(self.cinder('list'))
+ self.assertTableStruct(list, ['ID', 'Status', 'Name', 'Size',
+ 'Volume Type', 'Bootable',
+ 'Attached to'])
self.cinder('list', params='--all-tenants 1')
self.cinder('list', params='--all-tenants 0')
self.assertRaises(subprocess.CalledProcessError,
@@ -85,44 +92,59 @@
self.assertTableStruct(roles, ['Property', 'Value'])
def test_cinder_rate_limits(self):
- self.cinder('rate-limits')
+ rate_limits = self.parser.listing(self.cinder('rate-limits'))
+ self.assertTableStruct(rate_limits, ['Verb', 'URI', 'Value', 'Remain',
+ 'Unit', 'Next_Available'])
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
'Volume snapshot not available.')
def test_cinder_snapshot_list(self):
- self.cinder('snapshot-list')
+ snapshot_list = self.parser.listing(self.cinder('snapshot-list'))
+ self.assertTableStruct(snapshot_list, ['ID', 'Volume ID', 'Status',
+ 'Name', 'Size'])
def test_cinder_type_list(self):
- self.cinder('type-list')
+ type_list = self.parser.listing(self.cinder('type-list'))
+ self.assertTableStruct(type_list, ['ID', 'Name'])
def test_cinder_list_extensions(self):
- self.cinder('list-extensions')
roles = self.parser.listing(self.cinder('list-extensions'))
self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated'])
def test_cinder_credentials(self):
- self.cinder('credentials')
+ credentials = self.parser.listing(self.cinder('credentials'))
+ self.assertTableStruct(credentials, ['User Credentials', 'Value'])
def test_cinder_availability_zone_list(self):
- self.cinder('availability-zone-list')
+ zone_list = self.parser.listing(self.cinder('availability-zone-list'))
+ self.assertTableStruct(zone_list, ['Name', 'Status'])
def test_cinder_endpoints(self):
- self.cinder('endpoints')
+ endpoints = self.parser.listing(self.cinder('endpoints'))
+ self.assertTableStruct(endpoints, ['nova', 'Value'])
def test_cinder_service_list(self):
- self.cinder('service-list')
+ service_list = self.parser.listing(self.cinder('service-list'))
+ self.assertTableStruct(service_list, ['Binary', 'Host', 'Zone',
+ 'Status', 'State', 'Updated_at',
+ 'Disabled Reason'])
def test_cinder_transfer_list(self):
- self.cinder('transfer-list')
+ transfer_list = self.parser.listing(self.cinder('transfer-list'))
+ self.assertTableStruct(transfer_list, ['ID', 'Volume ID', 'Name'])
def test_cinder_bash_completion(self):
self.cinder('bash-completion')
def test_cinder_qos_list(self):
- self.cinder('qos-list')
+ qos_list = self.parser.listing(self.cinder('qos-list'))
+ self.assertTableStruct(qos_list, ['ID', 'Name', 'Consumer', 'specs'])
def test_cinder_encryption_type_list(self):
- self.cinder('encryption-type-list')
+ encrypt_list = self.parser.listing(self.cinder('encryption-type-list'))
+ self.assertTableStruct(encrypt_list, ['Volume Type ID', 'Provider',
+ 'Cipher', 'Key Size',
+ 'Control Location'])
def test_admin_help(self):
help_text = self.cinder('help')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index c1d58b5..49d079e 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -49,7 +49,8 @@
@test.attr(type='smoke')
def test_neutron_net_list(self):
- self.neutron('net-list')
+ net_list = self.parser.listing(self.neutron('net-list'))
+ self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
@test.attr(type='smoke')
def test_neutron_ext_list(self):
@@ -111,11 +112,14 @@
@test.attr(type='smoke')
@test.requires_ext(extension='external-net', service='network')
def test_neutron_net_external_list(self):
- self.neutron('net-external-list')
+ net_ext_list = self.parser.listing(self.neutron('net-external-list'))
+ self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
@test.attr(type='smoke')
def test_neutron_port_list(self):
- self.neutron('port-list')
+ port_list = self.parser.listing(self.neutron('port-list'))
+ self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
+ 'fixed_ips'])
@test.attr(type='smoke')
@test.requires_ext(extension='quotas', service='network')
@@ -125,7 +129,9 @@
@test.attr(type='smoke')
@test.requires_ext(extension='router', service='network')
def test_neutron_router_list(self):
- self.neutron('router-list')
+ router_list = self.parser.listing(self.neutron('router-list'))
+ self.assertTableStruct(router_list, ['id', 'name',
+ 'external_gateway_info'])
@test.attr(type='smoke')
@test.requires_ext(extension='security-group', service='network')
@@ -136,11 +142,18 @@
@test.attr(type='smoke')
@test.requires_ext(extension='security-group', service='network')
def test_neutron_security_group_rule_list(self):
- self.neutron('security-group-rule-list')
+ security_grp = self.parser.listing(self.neutron
+ ('security-group-rule-list'))
+ self.assertTableStruct(security_grp, ['id', 'security_group',
+ 'direction', 'protocol',
+ 'remote_ip_prefix',
+ 'remote_group'])
@test.attr(type='smoke')
def test_neutron_subnet_list(self):
- self.neutron('subnet-list')
+ subnet_list = self.parser.listing(self.neutron('subnet-list'))
+ self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
+ 'allocation_pools'])
@test.attr(type='smoke')
def test_neutron_help(self):
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 6720847..2ab008d 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -50,7 +50,7 @@
def iptables_raw(table):
- return sudo_cmd_call("iptables -v -S -t " + table)
+ return sudo_cmd_call("iptables --line-numbers -L -nv -t " + table)
def ip_ns_list():
diff --git a/tempest/config.py b/tempest/config.py
index 6475844..0796d98 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -942,6 +942,9 @@
cfg.BoolOpt('driver_enabled',
default=False,
help="Whether the Ironic nova-compute driver is enabled"),
+ cfg.StrOpt('driver',
+ default='fake',
+ help="Driver name which Ironic uses"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index e6593db..ca79325 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -22,6 +22,7 @@
import time
from cinderclient import exceptions as cinder_exceptions
+import glanceclient
from heatclient import exc as heat_exceptions
import netaddr
from neutronclient.common import exceptions as exc
@@ -84,8 +85,6 @@
cls.orchestration_client = cls.manager.orchestration_client
cls.data_processing_client = cls.manager.data_processing_client
cls.ceilometer_client = cls.manager.ceilometer_client
- cls.resource_keys = {}
- cls.os_resources = []
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -110,72 +109,85 @@
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'identity_admin')
- @staticmethod
- def cleanup_resource(resource, test_name):
+ def setUp(self):
+ super(OfficialClientTest, self).setUp()
+ self.cleanup_waits = []
+ # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
+ # because scenario tests in the same test class should not share
+ # resources. If resources were shared between test cases then it
+ # should be a single scenario test instead of multiples.
- LOG.debug("Deleting %r from shared resources of %s" %
- (resource, test_name))
+ # NOTE(yfried): this list is cleaned at the end of test_methods and
+ # not at the end of the class
+ self.addCleanup(self._wait_for_cleanups)
+
+ @staticmethod
+ def not_found_exception(exception):
+ """
+ @return: True if exception is of NotFound type
+ """
+ NOT_FOUND_LIST = ['NotFound', 'HTTPNotFound']
+ return (exception.__class__.__name__ in NOT_FOUND_LIST
+ or
+ hasattr(exception, 'status_code') and
+ exception.status_code == 404)
+
+ def delete_wrapper(self, thing):
+ """Ignores NotFound exceptions for delete operations.
+
+ @param thing: object with delete() method.
+ OpenStack resources are assumed to have a delete() method which
+ destroys the resource
+ """
+
try:
- # OpenStack resources are assumed to have a delete()
- # method which destroys the resource...
- resource.delete()
+ thing.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
- # - Status code tolerated as a workaround for bug 1247568
- # - HTTPNotFound tolerated as this is currently raised when
- # attempting to delete an already-deleted heat stack.
- if (e.__class__.__name__ in ('NotFound', 'HTTPNotFound') or
- (hasattr(e, 'status_code') and e.status_code == 404)):
- return
- raise
-
- def is_deletion_complete():
- # Deletion testing is only required for objects whose
- # existence cannot be checked via retrieval.
- if isinstance(resource, dict):
- return True
- try:
- resource.get()
- except Exception as e:
- # Clients are expected to return an exception
- # called 'NotFound' if retrieval fails.
- if e.__class__.__name__ == 'NotFound':
- return True
+ if not self.not_found_exception(e):
raise
- return False
- # Block until resource deletion has completed or timed-out
- tempest.test.call_until_true(is_deletion_complete, 10, 1)
+ def _wait_for_cleanups(self):
+ """To handle async delete actions, a list of waits is added
+ which will be iterated over as the last step of clearing the
+ cleanup queue. That way all the delete calls are made up front
+ and the tests won't succeed unless the deletes are eventually
+ successful. This is the same basic approach used in the api tests to
+ limit cleanup execution time except here it is multi-resource,
+ because of the nature of the scenario tests.
+ """
+ for wait in self.cleanup_waits:
+ self.delete_timeout(**wait)
- @classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Because scenario tests are typically run in a
- # specific order, and because test methods in scenario tests
- # generally create resources in a particular order, we destroy
- # resources in the reverse order in which resources are added to
- # the scenario test class object
- while cls.os_resources:
- thing = cls.os_resources.pop()
- cls.cleanup_resource(thing, cls.__name__)
- cls.isolated_creds.clear_isolated_creds()
- super(OfficialClientTest, cls).tearDownClass()
+ def addCleanup_with_wait(self, things, thing_id,
+ error_status='ERROR',
+ exc_type=nova_exceptions.NotFound,
+ cleanup_callable=None, cleanup_args=[],
+ cleanup_kwargs={}):
+ """Adds wait for ansyc resource deletion at the end of cleanups
- @classmethod
- def set_resource(cls, key, thing):
- LOG.debug("Adding %r to shared resources of %s" %
- (thing, cls.__name__))
- cls.resource_keys[key] = thing
- cls.os_resources.append(thing)
-
- @classmethod
- def get_resource(cls, key):
- return cls.resource_keys[key]
-
- @classmethod
- def remove_resource(cls, key):
- thing = cls.resource_keys[key]
- cls.os_resources.remove(thing)
- del cls.resource_keys[key]
+ @param things: type of the resource to delete
+ @param thing_id:
+ @param error_status: see manager.delete_timeout()
+ @param exc_type: see manager.delete_timeout()
+ @param cleanup_callable: method to load pass to self.addCleanup with
+ the following *cleanup_args, **cleanup_kwargs.
+ usually a delete method. if not used, will try to use:
+ things.delete(thing_id)
+ """
+ if cleanup_callable is None:
+ LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
+ " default".format(rclass=things, id=thing_id))
+ self.addCleanup(things.delete, thing_id)
+ else:
+ self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
+ wait_dict = {
+ 'things': things,
+ 'thing_id': thing_id,
+ 'error_status': error_status,
+ 'not_found_exception': exc_type,
+ }
+ self.cleanup_waits.append(wait_dict)
def status_timeout(self, things, thing_id, expected_status,
error_status='ERROR',
@@ -227,8 +239,11 @@
except not_found_exception:
if allow_notfound:
return True
- else:
- raise
+ raise
+ except Exception as e:
+ if allow_notfound and self.not_found_exception(e):
+ return True
+ raise
new_status = thing.status
@@ -288,6 +303,7 @@
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
+ self.addCleanup(self.delete_wrapper, sg_rule)
rules.append(sg_rule)
return rules
@@ -301,7 +317,7 @@
secgroup = client.security_groups.create(sg_name, sg_desc)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
+ self.addCleanup(self.delete_wrapper, secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(client, secgroup.id)
@@ -309,7 +325,17 @@
return secgroup
def create_server(self, client=None, name=None, image=None, flavor=None,
- wait=True, create_kwargs={}):
+ wait_on_boot=True, wait_on_delete=True,
+ create_kwargs={}):
+ """Creates VM instance.
+
+ @param client: compute client to create the instance
+ @param image: image from which to create the instance
+ @param wait_on_boot: wait for status ACTIVE before continue
+ @param wait_on_delete: force synchronous delete on cleanup
+ @param create_kwargs: additional details for instance creation
+ @return: client.server object
+ """
if client is None:
client = self.compute_client
if name is None:
@@ -343,19 +369,25 @@
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
- self.set_resource(name, server)
- if wait:
+ if wait_on_delete:
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ self.addCleanup_with_wait(self.compute_client.servers, server.id,
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[server])
+ if wait_on_boot:
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
- self.set_resource(name, server)
LOG.debug("Created server: %s", server)
return server
def create_volume(self, client=None, size=1, name=None,
- snapshot_id=None, imageRef=None, volume_type=None):
+ snapshot_id=None, imageRef=None, volume_type=None,
+ wait_on_delete=True):
if client is None:
client = self.volume_client
if name is None:
@@ -365,7 +397,12 @@
snapshot_id=snapshot_id,
imageRef=imageRef,
volume_type=volume_type)
- self.set_resource(name, volume)
+ if wait_on_delete:
+ self.addCleanup(self.delete_timeout,
+ self.volume_client.volumes,
+ volume.id)
+ self.addCleanup_with_wait(self.volume_client.volumes, volume.id,
+ exc_type=cinder_exceptions.NotFound)
self.assertEqual(name, volume.display_name)
self.status_timeout(client.volumes, volume.id, 'available')
LOG.debug("Created volume: %s", volume)
@@ -381,7 +418,8 @@
name = data_utils.rand_name('scenario-snapshot-')
LOG.debug("Creating a snapshot image for server: %s", server.name)
image_id = compute_client.servers.create_image(server, name)
- self.addCleanup(image_client.images.delete, image_id)
+ self.addCleanup_with_wait(self.image_client.images, image_id,
+ exc_type=glanceclient.exc.HTTPNotFound)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
self.assertEqual(name, snapshot_image.name)
@@ -396,7 +434,7 @@
name = data_utils.rand_name('scenario-keypair-')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
- self.set_resource(name, keypair)
+ self.addCleanup(self.delete_wrapper, keypair)
return keypair
def get_remote_client(self, server_or_ip, username=None, private_key=None):
@@ -590,9 +628,12 @@
'key_name': self.keypair.id
}
self.instance = self.create_server(
- wait=False, create_kwargs=create_kwargs)
+ wait_on_boot=False, create_kwargs=create_kwargs)
- self.set_resource('instance', self.instance)
+ self.addCleanup_with_wait(self.compute_client.servers,
+ self.instance.id,
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.instance])
self.wait_node(self.instance.id)
self.node = self.get_node(instance_id=self.instance.id)
@@ -617,7 +658,6 @@
def terminate_instance(self):
self.instance.delete()
- self.remove_resource('instance')
self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node.uuid,
@@ -643,11 +683,6 @@
self.status_timeout(
self.volume_client.volumes, self.volume.id, status)
- def _wait_for_volume_deletion(self):
- self.delete_timeout(
- self.volume_client.volumes, self.volume.id,
- not_found_exception=cinder_exceptions.NotFound)
-
def nova_boot(self):
self.keypair = self.create_keypair()
create_kwargs = {'key_name': self.keypair.name}
@@ -698,10 +733,6 @@
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual('available', volume.status)
- def cinder_delete_encrypted(self):
- self.volume_client.volumes.delete(self.volume.id)
- self._wait_for_volume_deletion()
-
class NetworkScenarioTest(OfficialClientTest):
"""
@@ -740,7 +771,7 @@
network = net_common.DeletableNetwork(client=self.network_client,
**result['network'])
self.assertEqual(network.name, name)
- self.set_resource(name, network)
+ self.addCleanup(self.delete_wrapper, network)
return network
def _list_networks(self, **kwargs):
@@ -816,7 +847,7 @@
subnet = net_common.DeletableSubnet(client=self.network_client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
- self.set_resource(data_utils.rand_name(namestart), subnet)
+ self.addCleanup(self.delete_wrapper, subnet)
return subnet
def _create_port(self, network, namestart='port-quotatest-'):
@@ -829,7 +860,7 @@
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_common.DeletablePort(client=self.network_client,
**result['port'])
- self.set_resource(name, port)
+ self.addCleanup(self.delete_wrapper, port)
return port
def _get_server_port_id(self, server, ip_addr=None):
@@ -852,7 +883,7 @@
floating_ip = net_common.DeletableFloatingIp(
client=self.network_client,
**result['floatingip'])
- self.set_resource(data_utils.rand_name('floatingip-'), floating_ip)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
@@ -897,7 +928,7 @@
pool = net_common.DeletablePool(client=self.network_client,
**resp['pool'])
self.assertEqual(pool['name'], name)
- self.set_resource(name, pool)
+ self.addCleanup(self.delete_wrapper, pool)
return pool
def _create_member(self, address, protocol_port, pool_id):
@@ -912,7 +943,7 @@
resp = self.network_client.create_member(body)
member = net_common.DeletableMember(client=self.network_client,
**resp['member'])
- self.set_resource(data_utils.rand_name('member-'), member)
+ self.addCleanup(self.delete_wrapper, member)
return member
def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
@@ -931,7 +962,7 @@
vip = net_common.DeletableVip(client=self.network_client,
**resp['vip'])
self.assertEqual(vip['name'], name)
- self.set_resource(name, vip)
+ self.addCleanup(self.delete_wrapper, vip)
return vip
def _check_vm_connectivity(self, ip_address,
@@ -1073,7 +1104,7 @@
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
+ self.addCleanup(self.delete_wrapper, secgroup)
return secgroup
def _default_security_group(self, tenant_id, client=None):
@@ -1132,6 +1163,7 @@
client=client,
**sg_rule['security_group_rule']
)
+ self.addCleanup(self.delete_wrapper, sg_rule)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
@@ -1230,7 +1262,7 @@
router = net_common.DeletableRouter(client=self.network_client,
**result['router'])
self.assertEqual(router.name, name)
- self.set_resource(name, router)
+ self.addCleanup(self.delete_wrapper, router)
return router
def _create_networks(self, tenant_id=None):
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index 82ba3c5..aa7b6f8 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -59,7 +59,7 @@
# if a keypair was set, do not delete the stack on exit to allow
# for manual post-mortums
if not CONF.orchestration.keypair_name:
- self.set_resource('stack', self.stack)
+ self.addCleanup(self.client.stacks.delete, self.stack)
@test.skip_because(bug="1257575")
@test.attr(type='slow')
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 6817c48..0059619 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -42,13 +42,12 @@
availability_zone = kwargs['availability_zone']
self.assertEqual(aggregate.name, aggregate_name)
self.assertEqual(aggregate.availability_zone, availability_zone)
- self.set_resource(aggregate.id, aggregate)
+ self.addCleanup(self._delete_aggregate, aggregate)
LOG.debug("Aggregate %s created." % (aggregate.name))
return aggregate
def _delete_aggregate(self, aggregate):
self.compute_client.aggregates.delete(aggregate.id)
- self.remove_resource(aggregate.id)
LOG.debug("Aggregate %s deleted. " % (aggregate.name))
def _get_host_name(self):
@@ -60,6 +59,7 @@
def _add_host(self, aggregate_name, host):
aggregate = self.compute_client.aggregates.add_host(aggregate_name,
host)
+ self.addCleanup(self._remove_host, aggregate, host)
self.assertIn(host, aggregate.hosts)
LOG.debug("Host %s added to Aggregate %s." % (host, aggregate.name))
@@ -128,6 +128,3 @@
metadata.update(additional_metadata)
self._check_aggregate_details(aggregate, aggregate.name, az, [host],
metadata)
-
- self._remove_host(aggregate, host)
- self._delete_aggregate(aggregate)
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index f223cbf..366cd93 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -29,7 +29,6 @@
* Creates an encryption type (as admin)
* Creates a volume of that encryption type (as a regular user)
* Attaches and detaches the encrypted volume to the instance
- * Deletes the encrypted volume
"""
def launch_instance(self):
@@ -49,21 +48,16 @@
self.nova_volume_attach()
self.nova_volume_detach()
- def delete_volume(self):
- self.cinder_delete_encrypted()
-
@test.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
self.launch_instance()
self.create_encrypted_volume('nova.volume.encryptors.'
'luks.LuksEncryptor')
self.attach_detach_volume()
- self.delete_volume()
@test.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
self.launch_instance()
self.create_encrypted_volume('nova.volume.encryptors.'
'cryptsetup.CryptsetupEncryptor')
- self.attach_detach_volume()
- self.delete_volume()
+ self.attach_detach_volume()
\ No newline at end of file
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index ed5743c..15cf13b 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -60,7 +60,13 @@
# needed because of bug 1199788
self.servers = [x for x in client.servers.list() if name in x.name]
for server in self.servers:
- self.set_resource(server.name, server)
+ # after deleting all servers - wait for all servers to clear
+ # before cleanup continues
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ for server in self.servers:
+ self.addCleanup_with_wait(self.compute_client.servers, server.id)
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 826da48..e041fd2 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -74,15 +74,11 @@
self.server_fixed_ips = {}
self._create_security_group()
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def _create_security_group(self):
self.security_group = self._create_security_group_neutron(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
- self.addCleanup(self.cleanup_wrapper, self.security_group)
def _create_security_group_rules_for_port(self, port):
rule = {
@@ -99,7 +95,6 @@
def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
- self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
@@ -111,14 +106,12 @@
}
server = self.create_server(name=name,
create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
self.servers_keypairs[server.id] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
@@ -211,7 +204,6 @@
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet.id)
- self.addCleanup(self.cleanup_wrapper, self.pool)
self.assertTrue(self.pool)
def _create_members(self):
@@ -227,17 +219,14 @@
member1 = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member1)
member2 = self._create_member(address=ip,
protocol_port=self.port2,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member2)
self.members.extend([member1, member2])
else:
member = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member)
self.members.append(member)
self.assertTrue(self.members)
@@ -246,7 +235,6 @@
port_id = vip.port_id
floating_ip = self._create_floating_ip(vip, public_network_id,
port_id=port_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
@@ -257,7 +245,6 @@
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, self.vip)
self.status_timeout(NeutronRetriever(self.network_client,
self.network_client.vip_path,
net_common.DeletableVip),
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 0406217..29fdc74 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -85,7 +85,7 @@
def nova_floating_ip_create(self):
self.floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, self.floating_ip)
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f1cd320..431de9a 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -50,23 +50,15 @@
cls.enabled = False
raise cls.skipException(msg)
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def setUp(self):
super(TestNetworkAdvancedServerOps, self).setUp()
key_name = data_utils.rand_name('keypair-smoke-')
self.keypair = self.create_keypair(name=key_name)
- self.addCleanup(self.cleanup_wrapper, self.keypair)
security_group =\
self._create_security_group_neutron(tenant_id=self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, security_group)
network = self._create_network(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, network)
router = self._get_router(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, router)
subnet = self._create_subnet(network)
- self.addCleanup(self.cleanup_wrapper, subnet)
subnet.add_to_router(router.id)
public_network_id = CONF.network.public_network_id
create_kwargs = {
@@ -79,10 +71,8 @@
server_name = data_utils.rand_name('server-smoke-%d-')
self.server = self.create_server(name=server_name,
create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, self.server)
self.floating_ip = self._create_floating_ip(self.server,
public_network_id)
- self.addCleanup(self.cleanup_wrapper, self.floating_ip)
def _check_network_connectivity(self, should_connect=True):
username = CONF.compute.image_ssh_user
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index c84d4b9..7dc817d 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -96,17 +96,11 @@
raise cls.skipException(msg)
cls.check_preconditions()
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.security_group = \
self._create_security_group_neutron(tenant_id=self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, self.security_group)
self.network, self.subnet, self.router = self._create_networks()
- for r in [self.network, self.router, self.subnet]:
- self.addCleanup(self.cleanup_wrapper, r)
self.check_networks()
self.servers = {}
name = data_utils.rand_name('server-smoke')
@@ -144,7 +138,6 @@
def _create_server(self, name, network):
keypair = self.create_keypair(name='keypair-%s' % name)
- self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
create_kwargs = {
'nics': [
@@ -154,7 +147,6 @@
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
return dict(server=server, keypair=keypair)
def _check_tenant_network_connectivity(self):
@@ -171,7 +163,6 @@
for server in self.servers.keys():
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
@@ -204,11 +195,9 @@
def _create_new_network(self):
self.new_net = self._create_network(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, self.new_net)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
- self.addCleanup(self.cleanup_wrapper, self.new_subnet)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
@@ -226,7 +215,10 @@
port_id=None,
fixed_ip=None)
# move server to the head of the cleanup list
- self.addCleanup(self.cleanup_wrapper, server)
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ self.addCleanup(self.delete_wrapper, server)
def check_ports():
port_list = [port for port in
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index dd89dc0..8058b3d 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -165,7 +165,6 @@
def _create_tenant_keypairs(self, tenant_id):
keypair = self.create_keypair(
name=data_utils.rand_name('keypair-smoke-'))
- self.addCleanup(self.cleanup_wrapper, keypair)
self.tenants[tenant_id].keypair = keypair
def _create_tenant_security_groups(self, tenant):
@@ -173,14 +172,12 @@
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id
)
- self.addCleanup(self.cleanup_wrapper, access_sg)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id
)
- self.addCleanup(self.cleanup_wrapper, def_sg)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
protocol='tcp',
@@ -188,9 +185,7 @@
port_range_max=22,
direction='ingress',
)
- rule = self._create_security_group_rule(secgroup=access_sg,
- **ssh_rule)
- self.addCleanup(self.cleanup_wrapper, rule)
+ self._create_security_group_rule(secgroup=access_sg, **ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
@@ -238,7 +233,6 @@
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
return server
def _create_tenant_servers(self, tenant, num=1):
@@ -269,13 +263,10 @@
def _assign_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(server, floating_ip)
def _create_tenant_network(self, tenant):
network, subnet, router = self._create_networks(tenant.creds.tenant_id)
- for r in [network, router, subnet]:
- self.addCleanup(self.cleanup_wrapper, r)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
@@ -355,11 +346,10 @@
remote_group_id=tenant.security_groups['default'].id,
direction='ingress'
)
- rule = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule)
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
@@ -385,11 +375,10 @@
protocol='icmp',
direction='ingress'
)
- rule_s2d = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule_s2d)
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
@@ -399,11 +388,10 @@
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
- rule_d2s = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule_d2s)
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 54f1d9e..38686d9 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -74,23 +74,17 @@
'key_name': self.keypair.id,
'security_groups': security_groups
}
- instance = self.create_server(image=self.image_ref,
- flavor=self.flavor_ref,
- create_kwargs=create_kwargs)
- self.set_resource('instance', instance)
-
- def terminate_instance(self):
- instance = self.get_resource('instance')
- instance.delete()
- self.remove_resource('instance')
+ self.instance = self.create_server(image=self.image_ref,
+ flavor=self.flavor_ref,
+ create_kwargs=create_kwargs)
def verify_ssh(self):
if self.run_ssh:
# Obtain a floating IP
floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(self.delete_wrapper, floating_ip)
# Attach a floating IP
- instance = self.get_resource('instance')
- instance.add_floating_ip(floating_ip)
+ self.instance.add_floating_ip(floating_ip)
# Check ssh
try:
self.get_remote_client(
@@ -108,4 +102,4 @@
self.security_group = self._create_security_group_nova()
self.boot_instance()
self.verify_ssh()
- self.terminate_instance()
+ self.instance.delete()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index d41490a..7dd662d 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -65,7 +65,7 @@
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _set_floating_ip_to_server(self, server, floating_ip):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 20561ae..be27024 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -74,7 +74,7 @@
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _add_floating_ip(self, server, floating_ip):
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 4905dbf..bf5d1f6 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from cinderclient import exceptions as cinder_exc
+
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
@@ -69,7 +71,8 @@
snap = volume_snapshots.create(volume_id=vol_id,
force=True,
display_name=snap_name)
- self.set_resource(snap.id, snap)
+ self.addCleanup_with_wait(self.volume_client.volume_snapshots, snap.id,
+ exc_type=cinder_exc.NotFound)
self.status_timeout(volume_snapshots,
snap.id,
'available')
@@ -100,8 +103,7 @@
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
floating_ip = self.compute_client.floating_ips.create()
- fip_name = data_utils.rand_name('scenario-fip')
- self.set_resource(fip_name, floating_ip)
+ self.addCleanup(self.delete_wrapper, floating_ip)
server.add_floating_ip(floating_ip)
ip = floating_ip.ip
else:
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index e1661c0..69d2f35 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -134,6 +134,7 @@
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
+ self.validate_response(schema.get_server, resp, body)
return resp, body['server']
def delete_server(self, server_id):
@@ -524,6 +525,7 @@
"""List the server-groups."""
resp, body = self.get("os-server-groups")
body = json.loads(body)
+ self.validate_response(schema.list_server_groups, resp, body)
return resp, body['server_groups']
def get_server_group(self, server_group_id):
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 11258a6..d933998 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -136,6 +136,7 @@
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
+ self.validate_response(schema.get_server, resp, body)
return resp, body['server']
def delete_server(self, server_id):
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 81792c4..4ee8302 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -28,6 +28,7 @@
'vips': 'lb',
'health_monitors': 'lb',
'members': 'lb',
+ 'ipsecpolicies': 'vpn',
'vpnservices': 'vpn',
'ikepolicies': 'vpn',
'ipsecpolicies': 'vpn',
@@ -47,6 +48,7 @@
resource_plural_map = {
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
+ 'ipsecpolicy': 'ipsecpolicies',
'ikepolicy': 'ikepolicies',
'ipsecpolicy': 'ipsecpolicies',
'quotas': 'quotas',
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
index bdb9269..1e2925b 100644
--- a/tempest/tests/test_commands.py
+++ b/tempest/tests/test_commands.py
@@ -47,7 +47,8 @@
@mock.patch('subprocess.Popen')
def test_iptables_raw(self, mock):
table = 'filter'
- expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
+ expected = ['/usr/bin/sudo', '-n', 'iptables', '--line-numbers',
+ '-L', '-nv', '-t',
'%s' % table]
commands.iptables_raw(table)
mock.assert_called_once_with(expected, **self.subprocess_args)