Merge "Use image minDisk as volume size when necessary"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 1095e77..27d65e6 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -432,6 +432,11 @@
# value)
#attach_encrypted_volume = true
+# Does the test environment support creating instances with multiple
+# ports on the same network? This is only valid when using Neutron.
+# (boolean value)
+#allow_duplicate_networks = false
+
[dashboard]
diff --git a/setup.cfg b/setup.cfg
index 5c78632..36b2270 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,12 @@
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
+[files]
+packages =
+ tempest
+data_files =
+ /etc/tempest = etc/*
+
[entry_points]
console_scripts =
verify-tempest-config = tempest.cmd.verify_tempest_config:main
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 91e55d6..9334fb6 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -106,7 +106,7 @@
# set the metadata of the aggregate
meta = {"key": "value"}
- body = self.client.set_metadata(aggregate['id'], meta)
+ body = self.client.set_metadata(aggregate['id'], metadata=meta)
self.assertEqual(meta, body["metadata"])
# verify the metadata has been set
@@ -130,9 +130,10 @@
new_aggregate_name = aggregate_name + '_new'
new_az_name = az_name + '_new'
- resp_aggregate = self.client.update_aggregate(aggregate_id,
- new_aggregate_name,
- new_az_name)
+ resp_aggregate = self.client.update_aggregate(
+ aggregate_id,
+ name=new_aggregate_name,
+ availability_zone=new_az_name)
self.assertEqual(new_aggregate_name, resp_aggregate['name'])
self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
@@ -150,13 +151,13 @@
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
- body = self.client.add_host(aggregate['id'], self.host)
+ body = self.client.add_host(aggregate['id'], host=self.host)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertIn(self.host, body['hosts'])
- body = self.client.remove_host(aggregate['id'], self.host)
+ body = self.client.remove_host(aggregate['id'], host=self.host)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
@@ -169,8 +170,9 @@
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
- self.client.add_host(aggregate['id'], self.host)
- self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+ self.client.add_host(aggregate['id'], host=self.host)
+ self.addCleanup(self.client.remove_host, aggregate['id'],
+ host=self.host)
aggregates = self.client.list_aggregates()
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
@@ -187,8 +189,9 @@
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
- self.client.add_host(aggregate['id'], self.host)
- self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+ self.client.add_host(aggregate['id'], host=self.host)
+ self.addCleanup(self.client.remove_host, aggregate['id'],
+ host=self.host)
body = self.client.show_aggregate(aggregate['id'])
self.assertEqual(aggregate_name, body['name'])
@@ -204,8 +207,9 @@
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
- self.client.add_host(aggregate['id'], self.host)
- self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+ self.client.add_host(aggregate['id'], host=self.host)
+ self.addCleanup(self.client.remove_host, aggregate['id'],
+ host=self.host)
server_name = data_utils.rand_name('test_server')
admin_servers_client = self.os_adm.servers_client
server = self.create_test_server(name=server_name,
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index 74a8547..231c88f 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -143,7 +143,7 @@
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.NotFound, self.client.add_host,
- aggregate['id'], non_exist_host)
+ aggregate['id'], host=non_exist_host)
@test.attr(type=['negative'])
@test.idempotent_id('7324c334-bd13-4c93-8521-5877322c3d51')
@@ -155,7 +155,7 @@
self.assertRaises(lib_exc.Forbidden,
self.user_client.add_host,
- aggregate['id'], self.host)
+ aggregate['id'], host=self.host)
@test.attr(type=['negative'])
@test.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
@@ -165,11 +165,12 @@
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
- self.client.add_host(aggregate['id'], self.host)
- self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+ self.client.add_host(aggregate['id'], host=self.host)
+ self.addCleanup(self.client.remove_host, aggregate['id'],
+ host=self.host)
self.assertRaises(lib_exc.Conflict, self.client.add_host,
- aggregate['id'], self.host)
+ aggregate['id'], host=self.host)
@test.attr(type=['negative'])
@test.idempotent_id('7a53af20-137a-4e44-a4ae-e19260e626d9')
@@ -179,12 +180,13 @@
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
- self.client.add_host(aggregate['id'], self.host)
- self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
+ self.client.add_host(aggregate['id'], host=self.host)
+ self.addCleanup(self.client.remove_host, aggregate['id'],
+ host=self.host)
self.assertRaises(lib_exc.Forbidden,
self.user_client.remove_host,
- aggregate['id'], self.host)
+ aggregate['id'], host=self.host)
@test.attr(type=['negative'])
@test.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
@@ -195,4 +197,4 @@
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.NotFound, self.client.remove_host,
- aggregate['id'], non_exist_host)
+ aggregate['id'], host=non_exist_host)
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index 1c9e5d7..204281c 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -50,7 +50,7 @@
def test_list_usage_all_tenants(self):
# Get usage for all tenants
tenant_usage = self.adm_client.list_tenant_usages(
- start=self.start, end=self.end, detailed=int(bool(True)))
+ start=self.start, end=self.end, detailed="1")
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index 934fc31..e9b4ad4 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -66,6 +66,6 @@
# Get usage for all tenants with non admin user
params = {'start': self.start,
'end': self.end,
- 'detailed': int(bool(True))}
+ 'detailed': "1"}
self.assertRaises(lib_exc.Forbidden,
self.client.list_tenant_usages, **params)
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 39447b8..23a9cb3 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -65,6 +65,20 @@
cls.password = cls.server_initial['adminPass']
cls.server = cls.client.show_server(cls.server_initial['id'])
+ def _create_net_subnet_ret_net_from_cidr(self, cidr):
+ name_net = data_utils.rand_name(self.__class__.__name__)
+ net = self.network_client.create_network(name=name_net)
+ self.addCleanup(self.network_client.delete_network,
+ net['network']['id'])
+
+ subnet = self.network_client.create_subnet(
+ network_id=net['network']['id'],
+ cidr=cidr,
+ ip_version=4)
+ self.addCleanup(self.network_client.delete_subnet,
+ subnet['subnet']['id'])
+ return net
+
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
@@ -147,29 +161,8 @@
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
- name_net1 = data_utils.rand_name(self.__class__.__name__)
- net1 = self.network_client.create_network(name=name_net1)
- self.addCleanup(self.network_client.delete_network,
- net1['network']['id'])
-
- name_net2 = data_utils.rand_name(self.__class__.__name__)
- net2 = self.network_client.create_network(name=name_net2)
- self.addCleanup(self.network_client.delete_network,
- net2['network']['id'])
-
- subnet1 = self.network_client.create_subnet(
- network_id=net1['network']['id'],
- cidr='19.80.0.0/24',
- ip_version=4)
- self.addCleanup(self.network_client.delete_subnet,
- subnet1['subnet']['id'])
-
- subnet2 = self.network_client.create_subnet(
- network_id=net2['network']['id'],
- cidr='19.86.0.0/24',
- ip_version=4)
- self.addCleanup(self.network_client.delete_subnet,
- subnet2['subnet']['id'])
+ net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+ net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
@@ -196,13 +189,50 @@
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
- addr = [addresses[name_net1][0]['addr'],
- addresses[name_net2][0]['addr']]
+ addr = [addresses[net1['network']['name']][0]['addr'],
+ addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
+ @test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
+ @testtools.skipUnless(CONF.service_available.neutron,
+ 'Neutron service must be available.')
+ # The below skipUnless should be removed once Kilo-eol happens.
+ @testtools.skipUnless(CONF.compute_feature_enabled.
+ allow_duplicate_networks,
+ 'Duplicate networks must be allowed')
+ def test_verify_duplicate_network_nics(self):
+ # Verify that server creation does not fail when more than one nic
+ # is created on the same network.
+ net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
+ net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+
+ networks = [{'uuid': net1['network']['id']},
+ {'uuid': net2['network']['id']},
+ {'uuid': net1['network']['id']}]
+
+ server_multi_nics = self.create_test_server(
+ networks=networks, wait_until='ACTIVE')
+
+ def cleanup_server():
+ self.client.delete_server(server_multi_nics['id'])
+ self.client.wait_for_server_termination(server_multi_nics['id'])
+
+ self.addCleanup(cleanup_server)
+
+ addresses = self.client.list_addresses(server_multi_nics['id'])
+
+ addr = [addresses[net1['network']['name']][0]['addr'],
+ addresses[net2['network']['name']][0]['addr'],
+ addresses[net1['network']['name']][1]['addr']]
+ networks = [netaddr.IPNetwork('19.80.0.0/24'),
+ netaddr.IPNetwork('19.86.0.0/24'),
+ netaddr.IPNetwork('19.80.0.0/24')]
+ for address, network in zip(addr, networks):
+ self.assertIn(address, network)
+
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index cb113a7..63395cc 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -61,7 +61,7 @@
# Change quotas for tenant
quota_set = self.admin_client.update_quotas(tenant_id,
- **new_quotas)
+ **new_quotas)['quota']
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
for key, value in six.iteritems(new_quotas):
self.assertEqual(value, quota_set[key])
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 4f54562..6a8fbec 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -156,26 +156,21 @@
network = self.create_network()
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
- # Create two ports specifying a fixed_ips
- address = self._get_ipaddress_from_tempest_conf()
- _fixed_ip_1 = str(address + 3)
- _fixed_ip_2 = str(address + 4)
- fixed_ips_1 = [{'ip_address': _fixed_ip_1}]
- port_1 = self.client.create_port(network_id=network['id'],
- fixed_ips=fixed_ips_1)
+ # Create two ports
+ port_1 = self.client.create_port(network_id=network['id'])
self.addCleanup(self.client.delete_port, port_1['port']['id'])
- fixed_ips_2 = [{'ip_address': _fixed_ip_2}]
- port_2 = self.client.create_port(network_id=network['id'],
- fixed_ips=fixed_ips_2)
+ port_2 = self.client.create_port(network_id=network['id'])
self.addCleanup(self.client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
- fixed_ips = 'ip_address=' + _fixed_ip_1
+ port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
+ fixed_ips = 'ip_address=' + port_1_fixed_ip
port_list = self.client.list_ports(fixed_ips=fixed_ips)
+ # Check that we got the desired port
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port_1['port']['id'])
self.assertEqual(ports[0]['fixed_ips'][0]['ip_address'],
- _fixed_ip_1)
+ port_1_fixed_ip)
self.assertEqual(ports[0]['network_id'], network['id'])
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 2c545a7..8015c35 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -38,6 +38,10 @@
cls.volume = cls.create_volume()
+ def _delete_backup(self, backup_id):
+ self.backups_adm_client.delete_backup(backup_id)
+ self.backups_adm_client.wait_for_backup_deletion(backup_id)
+
@test.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
def test_volume_backup_create_get_detailed_list_restore_delete(self):
# Create backup
@@ -74,6 +78,52 @@
self.admin_volume_client.wait_for_volume_status(
restore['volume_id'], 'available')
+ @test.idempotent_id('a99c54a1-dd80-4724-8a13-13bf58d4068d')
+ def test_volume_backup_export_import(self):
+ # Create backup
+ backup_name = data_utils.rand_name('Backup')
+ backup = self.backups_adm_client.create_backup(self.volume['id'],
+ name=backup_name)
+ self.addCleanup(self._delete_backup, backup['id'])
+ self.assertEqual(backup_name, backup['name'])
+ self.backups_adm_client.wait_for_backup_status(backup['id'],
+ 'available')
+
+ # Export Backup
+ export_backup = self.backups_adm_client.export_backup(backup['id'])
+ self.assertIn('backup_service', export_backup)
+ self.assertIn('backup_url', export_backup)
+ self.assertTrue(export_backup['backup_service'].startswith(
+ 'cinder.backup.drivers'))
+ self.assertIsNotNone(export_backup['backup_url'])
+
+ # Import Backup
+ import_backup = self.backups_adm_client.import_backup(
+ backup_service=export_backup['backup_service'],
+ backup_url=export_backup['backup_url'])
+ self.addCleanup(self._delete_backup, import_backup['id'])
+ self.assertIn("id", import_backup)
+ self.backups_adm_client.wait_for_backup_status(import_backup['id'],
+ 'available')
+
+ # Verify Import Backup
+ backups = self.backups_adm_client.list_backups(detail=True)
+ self.assertIn(import_backup['id'], [b['id'] for b in backups])
+
+ # Restore backup
+ restore = self.backups_adm_client.restore_backup(import_backup['id'])
+ self.addCleanup(self.admin_volume_client.delete_volume,
+ restore['volume_id'])
+ self.assertEqual(import_backup['id'], restore['backup_id'])
+ self.admin_volume_client.wait_for_volume_status(restore['volume_id'],
+ 'available')
+
+ # Verify if restored volume is there in volume list
+ volumes = self.admin_volume_client.list_volumes()
+ self.assertIn(restore['volume_id'], [v['id'] for v in volumes])
+ self.backups_adm_client.wait_for_backup_status(import_backup['id'],
+ 'available')
+
class VolumesBackupsV1Test(VolumesBackupsV2Test):
_api_version = 1
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 1de20d6..dcdf7c5 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -1,12 +1,12 @@
#!/usr/bin/env python
-# Copyright 2014 Dell Inc.
+# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -153,9 +153,8 @@
for snap in snaps:
try:
client.delete_snapshot(snap['id'])
- except Exception as e:
- LOG.exception("Delete Snapshot exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Snapshot exception.")
def dry_run(self):
snaps = self.list()
@@ -180,9 +179,8 @@
for server in servers:
try:
client.delete_server(server['id'])
- except Exception as e:
- LOG.exception("Delete Server exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Server exception.")
def dry_run(self):
servers = self.list()
@@ -203,9 +201,8 @@
for sg in sgs:
try:
client.delete_server_group(sg['id'])
- except Exception as e:
- LOG.exception("Delete Server Group exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Server Group exception.")
def dry_run(self):
sgs = self.list()
@@ -229,9 +226,8 @@
for stack in stacks:
try:
client.delete_stack(stack['id'])
- except Exception as e:
- LOG.exception("Delete Stack exception: %s " % e)
- pass
+ except Exception:
+ LOG.exception("Delete Stack exception.")
def dry_run(self):
stacks = self.list()
@@ -256,9 +252,8 @@
try:
name = k['keypair']['name']
client.delete_keypair(name)
- except Exception as e:
- LOG.exception("Delete Keypairs exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Keypairs exception.")
def dry_run(self):
keypairs = self.list()
@@ -283,8 +278,8 @@
for g in secgrp_del:
try:
client.delete_security_group(g['id'])
- except Exception as e:
- LOG.exception("Delete Security Groups exception: %s" % e)
+ except Exception:
+ LOG.exception("Delete Security Groups exception.")
def dry_run(self):
secgrp_del = self.list()
@@ -308,9 +303,8 @@
for f in floating_ips:
try:
client.delete_floating_ip(f['id'])
- except Exception as e:
- LOG.exception("Delete Floating IPs exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Floating IPs exception.")
def dry_run(self):
floating_ips = self.list()
@@ -334,9 +328,8 @@
for v in vols:
try:
client.delete_volume(v['id'])
- except Exception as e:
- LOG.exception("Delete Volume exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Volume exception.")
def dry_run(self):
vols = self.list()
@@ -352,9 +345,8 @@
client = self.client
try:
client.delete_quota_set(self.tenant_id)
- except Exception as e:
- LOG.exception("Delete Volume Quotas exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Volume Quotas exception.")
def dry_run(self):
quotas = self.client.show_quota_usage(self.tenant_id)
@@ -371,9 +363,8 @@
client = self.client
try:
client.delete_quota_set(self.tenant_id)
- except Exception as e:
- LOG.exception("Delete Quotas exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Quotas exception.")
def dry_run(self):
client = self.limits_client
@@ -411,9 +402,8 @@
for n in networks:
try:
client.delete_network(n['id'])
- except Exception as e:
- LOG.exception("Delete Network exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Network exception.")
def dry_run(self):
networks = self.list()
@@ -436,9 +426,8 @@
for flip in flips:
try:
client.delete_floatingip(flip['id'])
- except Exception as e:
- LOG.exception("Delete Network Floating IP exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Network Floating IP exception.")
def dry_run(self):
flips = self.list()
@@ -471,9 +460,8 @@
subid = port['fixed_ips'][0]['subnet_id']
client.remove_router_interface_with_subnet_id(rid, subid)
client.delete_router(rid)
- except Exception as e:
- LOG.exception("Delete Router exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Router exception.")
def dry_run(self):
routers = self.list()
@@ -496,9 +484,8 @@
for hm in hms:
try:
client.delete_health_monitor(hm['id'])
- except Exception as e:
- LOG.exception("Delete Health Monitor exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Health Monitor exception.")
def dry_run(self):
hms = self.list()
@@ -521,9 +508,8 @@
for member in members:
try:
client.delete_member(member['id'])
- except Exception as e:
- LOG.exception("Delete Member exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Member exception.")
def dry_run(self):
members = self.list()
@@ -546,9 +532,8 @@
for vip in vips:
try:
client.delete_vip(vip['id'])
- except Exception as e:
- LOG.exception("Delete VIP exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete VIP exception.")
def dry_run(self):
vips = self.list()
@@ -571,9 +556,8 @@
for pool in pools:
try:
client.delete_pool(pool['id'])
- except Exception as e:
- LOG.exception("Delete Pool exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Pool exception.")
def dry_run(self):
pools = self.list()
@@ -596,9 +580,8 @@
for rule in rules:
try:
client.delete_metering_label_rule(rule['id'])
- except Exception as e:
- LOG.exception("Delete Metering Label Rule exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Metering Label Rule exception.")
def dry_run(self):
rules = self.list()
@@ -621,9 +604,8 @@
for label in labels:
try:
client.delete_metering_label(label['id'])
- except Exception as e:
- LOG.exception("Delete Metering Label exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Metering Label exception.")
def dry_run(self):
labels = self.list()
@@ -648,9 +630,8 @@
for port in ports:
try:
client.delete_port(port['id'])
- except Exception as e:
- LOG.exception("Delete Port exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Port exception.")
def dry_run(self):
ports = self.list()
@@ -675,9 +656,8 @@
for subnet in subnets:
try:
client.delete_subnet(subnet['id'])
- except Exception as e:
- LOG.exception("Delete Subnet exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Subnet exception.")
def dry_run(self):
subnets = self.list()
@@ -702,9 +682,8 @@
for alarm in alarms:
try:
client.delete_alarm(alarm['id'])
- except Exception as e:
- LOG.exception("Delete Alarms exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Alarms exception.")
def dry_run(self):
alarms = self.list()
@@ -737,9 +716,8 @@
for flavor in flavors:
try:
client.delete_flavor(flavor['id'])
- except Exception as e:
- LOG.exception("Delete Flavor exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Flavor exception.")
def dry_run(self):
flavors = self.list()
@@ -775,9 +753,8 @@
for image in images:
try:
client.delete_image(image['id'])
- except Exception as e:
- LOG.exception("Delete Image exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Image exception.")
def dry_run(self):
images = self.list()
@@ -823,9 +800,8 @@
for user in users:
try:
client.delete_user(user['id'])
- except Exception as e:
- LOG.exception("Delete User exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete User exception.")
def dry_run(self):
users = self.list()
@@ -852,8 +828,8 @@
and role['name'] != CONF.identity.admin_role)]
LOG.debug("List count, %s Roles after reconcile" % len(roles))
return roles
- except Exception as ex:
- LOG.exception("Cannot retrieve Roles, exception: %s" % ex)
+ except Exception:
+ LOG.exception("Cannot retrieve Roles.")
return []
def delete(self):
@@ -862,9 +838,8 @@
for role in roles:
try:
client.delete_role(role['id'])
- except Exception as e:
- LOG.exception("Delete Role exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Role exception.")
def dry_run(self):
roles = self.list()
@@ -900,9 +875,8 @@
for tenant in tenants:
try:
client.delete_tenant(tenant['id'])
- except Exception as e:
- LOG.exception("Delete Tenant exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Tenant exception.")
def dry_run(self):
tenants = self.list()
@@ -938,9 +912,8 @@
try:
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
- except Exception as e:
- LOG.exception("Delete Domain exception: %s" % e)
- pass
+ except Exception:
+ LOG.exception("Delete Domain exception.")
def dry_run(self):
domains = self.list()
diff --git a/tempest/config.py b/tempest/config.py
index 7382088..5ea4d10 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -22,6 +22,8 @@
from oslo_log import log as logging
+from tempest.test_discover import plugins
+
# TODO(marun) Replace use of oslo_config's global ConfigOpts
# (cfg.CONF) instance with a local instance (cfg.ConfigOpts()) once
@@ -388,6 +390,14 @@
'encrypted volume to a running server instance? This may '
'depend on the combination of compute_driver in nova and '
'the volume_driver(s) in cinder.'),
+ # TODO(mriedem): Remove allow_duplicate_networks once kilo-eol happens
+ # since the option was removed from nova in Liberty and is the default
+ # behavior starting in Liberty.
+ cfg.BoolOpt('allow_duplicate_networks',
+ default=False,
+ help='Does the test environment support creating instances '
+ 'with multiple ports on the same network? This is only '
+ 'valid when using Neutron.'),
]
@@ -1184,8 +1194,12 @@
def register_opts():
+ ext_plugins = plugins.TempestTestPluginManager()
+ # Register in-tree tempest config options
for g, o in _opts:
register_opt_group(_CONF, g, o)
+ # Call external plugin config option registration
+ ext_plugins.register_plugin_opts(_CONF)
def list_opts():
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 02d1171..f5f4a61 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -64,12 +64,12 @@
return computes[0]['host_name']
def _add_host(self, aggregate_id, host):
- aggregate = self.aggregates_client.add_host(aggregate_id, host)
+ aggregate = self.aggregates_client.add_host(aggregate_id, host=host)
self.addCleanup(self._remove_host, aggregate['id'], host)
self.assertIn(host, aggregate['hosts'])
def _remove_host(self, aggregate_id, host):
- aggregate = self.aggregates_client.remove_host(aggregate_id, host)
+ aggregate = self.aggregates_client.remove_host(aggregate_id, host=host)
self.assertNotIn(host, aggregate['hosts'])
def _check_aggregate_details(self, aggregate, aggregate_name, azone,
@@ -85,7 +85,7 @@
def _set_aggregate_metadata(self, aggregate, meta):
aggregate = self.aggregates_client.set_metadata(aggregate['id'],
- meta)
+ metadata=meta)
for key, value in meta.items():
self.assertEqual(meta[key], aggregate['metadata'][key])
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 28d4ff5..4114b8b 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -45,13 +45,9 @@
self.validate_response(schema.create_aggregate, resp, body)
return service_client.ResponseBody(resp, body['aggregate'])
- def update_aggregate(self, aggregate_id, name, availability_zone=None):
+ def update_aggregate(self, aggregate_id, **kwargs):
"""Update a aggregate."""
- put_body = {
- 'name': name,
- 'availability_zone': availability_zone
- }
- put_body = json.dumps({'aggregate': put_body})
+ put_body = json.dumps({'aggregate': kwargs})
resp, body = self.put('os-aggregates/%s' % aggregate_id, put_body)
body = json.loads(body)
@@ -76,36 +72,27 @@
"""Returns the primary type of resource this client works with."""
return 'aggregate'
- def add_host(self, aggregate_id, host):
+ def add_host(self, aggregate_id, **kwargs):
"""Adds a host to the given aggregate."""
- post_body = {
- 'host': host,
- }
- post_body = json.dumps({'add_host': post_body})
+ post_body = json.dumps({'add_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return service_client.ResponseBody(resp, body['aggregate'])
- def remove_host(self, aggregate_id, host):
+ def remove_host(self, aggregate_id, **kwargs):
"""Removes a host from the given aggregate."""
- post_body = {
- 'host': host,
- }
- post_body = json.dumps({'remove_host': post_body})
+ post_body = json.dumps({'remove_host': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
self.validate_response(schema.aggregate_add_remove_host, resp, body)
return service_client.ResponseBody(resp, body['aggregate'])
- def set_metadata(self, aggregate_id, meta):
+ def set_metadata(self, aggregate_id, **kwargs):
"""Replaces the aggregate's existing metadata with new metadata."""
- post_body = {
- 'metadata': meta,
- }
- post_body = json.dumps({'set_metadata': post_body})
+ post_body = json.dumps({'set_metadata': kwargs})
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 65f3aa7..ce200d2 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -39,46 +39,12 @@
version = '2.0'
uri_prefix = "v2.0"
- def get_uri(self, plural_name):
- # get service prefix from resource name
-
- # the following map is used to construct proper URI
- # for the given neutron resource
- service_resource_prefix_map = {
- 'networks': '',
- 'subnets': '',
- 'ports': '',
- 'metering_labels': 'metering',
- 'metering_label_rules': 'metering',
- }
- service_prefix = service_resource_prefix_map.get(
- plural_name)
- plural_name = plural_name.replace("_", "-")
- if service_prefix:
- uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
- plural_name)
- else:
- uri = '%s/%s' % (self.uri_prefix, plural_name)
- return uri
-
- def pluralize(self, resource_name):
- # get plural from map or just add 's'
-
- # map from resource name to a plural name
- # needed only for those which can't be constructed as name + 's'
- resource_plural_map = {
- 'security_groups': 'security_groups',
- 'security_group_rules': 'security_group_rules',
- 'quotas': 'quotas',
- }
- return resource_plural_map.get(resource_name, resource_name + 's')
-
def _list_resources(self, uri, **filters):
req_uri = self.uri_prefix + uri
if filters:
req_uri += '?' + urllib.urlencode(filters, doseq=1)
resp, body = self.get(req_uri)
- body = self.deserialize_list(body)
+ body = json.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
@@ -96,23 +62,23 @@
if fields:
req_uri += '?' + urllib.urlencode(fields, doseq=1)
resp, body = self.get(req_uri)
- body = self.deserialize_single(body)
+ body = json.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
def _create_resource(self, uri, post_data):
req_uri = self.uri_prefix + uri
- req_post_data = self.serialize(post_data)
+ req_post_data = json.dumps(post_data)
resp, body = self.post(req_uri, req_post_data)
- body = self.deserialize_single(body)
+ body = json.loads(body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def _update_resource(self, uri, post_data):
req_uri = self.uri_prefix + uri
- req_post_data = self.serialize(post_data)
+ req_post_data = json.dumps(post_data)
resp, body = self.put(req_uri, req_post_data)
- body = self.deserialize_single(body)
+ body = json.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
@@ -285,34 +251,21 @@
uri = '/extensions'
return self._list_resources(uri, **filters)
- # Common methods that are hard to automate
def create_bulk_network(self, names):
network_list = [{'name': name} for name in names]
post_data = {'networks': network_list}
- body = self.serialize_list(post_data, "networks", "network")
- uri = self.get_uri("networks")
- resp, body = self.post(uri, body)
- body = self.deserialize_list(body)
- self.expected_success(201, resp.status)
- return service_client.ResponseBody(resp, body)
+ uri = '/networks'
+ return self._create_resource(uri, post_data)
def create_bulk_subnet(self, subnet_list):
post_data = {'subnets': subnet_list}
- body = self.serialize_list(post_data, 'subnets', 'subnet')
- uri = self.get_uri('subnets')
- resp, body = self.post(uri, body)
- body = self.deserialize_list(body)
- self.expected_success(201, resp.status)
- return service_client.ResponseBody(resp, body)
+ uri = '/subnets'
+ return self._create_resource(uri, post_data)
def create_bulk_port(self, port_list):
post_data = {'ports': port_list}
- body = self.serialize_list(post_data, 'ports', 'port')
- uri = self.get_uri('ports')
- resp, body = self.post(uri, body)
- body = self.deserialize_list(body)
- self.expected_success(201, resp.status)
- return service_client.ResponseBody(resp, body)
+ uri = '/ports'
+ return self._create_resource(uri, post_data)
def wait_for_resource_deletion(self, resource_type, id):
"""Waits for a resource to be deleted."""
@@ -371,32 +324,14 @@
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
- def deserialize_single(self, body):
- return json.loads(body)
-
- def deserialize_list(self, body):
- return json.loads(body)
-
- def serialize(self, data):
- return json.dumps(data)
-
- def serialize_list(self, data, root=None, item=None):
- return self.serialize(data)
-
def update_quotas(self, tenant_id, **kwargs):
put_body = {'quota': kwargs}
- body = json.dumps(put_body)
- uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
- resp, body = self.put(uri, body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body['quota'])
+ uri = '/quotas/%s' % tenant_id
+ return self._update_resource(uri, put_body)
def reset_quotas(self, tenant_id):
- uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
- resp, body = self.delete(uri)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
+ uri = '/quotas/%s' % tenant_id
+ return self._delete_resource(uri)
def show_quotas(self, tenant_id, **fields):
uri = '/quotas/%s' % tenant_id
@@ -410,18 +345,12 @@
post_body = {'router': kwargs}
post_body['router']['name'] = name
post_body['router']['admin_state_up'] = admin_state_up
- body = json.dumps(post_body)
- uri = '%s/routers' % (self.uri_prefix)
- resp, body = self.post(uri, body)
- self.expected_success(201, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/routers'
+ return self._create_resource(uri, post_body)
def _update_router(self, router_id, set_enable_snat, **kwargs):
- uri = '%s/routers/%s' % (self.uri_prefix, router_id)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = json.loads(body)
+ uri = '/routers/%s' % router_id
+ body = self._show_resource(uri)
update_body = {}
update_body['name'] = kwargs.get('name', body['router']['name'])
update_body['admin_state_up'] = kwargs.get(
@@ -440,11 +369,7 @@
if 'distributed' in kwargs:
update_body['distributed'] = kwargs['distributed']
update_body = dict(router=update_body)
- update_body = json.dumps(update_body)
- resp, body = self.put(uri, update_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, update_body)
def update_router(self, router_id, **kwargs):
"""Update a router leaving enable_snat to its default value."""
@@ -476,64 +401,37 @@
return self._update_router(router_id, set_enable_snat=True, **kwargs)
def add_router_interface_with_subnet_id(self, router_id, subnet_id):
- uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
- router_id)
+ uri = '/routers/%s/add_router_interface' % router_id
update_body = {"subnet_id": subnet_id}
- update_body = json.dumps(update_body)
- resp, body = self.put(uri, update_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, update_body)
def add_router_interface_with_port_id(self, router_id, port_id):
- uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
- router_id)
+ uri = '/routers/%s/add_router_interface' % router_id
update_body = {"port_id": port_id}
- update_body = json.dumps(update_body)
- resp, body = self.put(uri, update_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, update_body)
def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
- uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
- router_id)
+ uri = '/routers/%s/remove_router_interface' % router_id
update_body = {"subnet_id": subnet_id}
- update_body = json.dumps(update_body)
- resp, body = self.put(uri, update_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, update_body)
def remove_router_interface_with_port_id(self, router_id, port_id):
- uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
- router_id)
+ uri = '/routers/%s/remove_router_interface' % router_id
update_body = {"port_id": port_id}
- update_body = json.dumps(update_body)
- resp, body = self.put(uri, update_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, update_body)
def list_router_interfaces(self, uuid):
- uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/ports?device_id=%s' % uuid
+ return self._list_resources(uri)
def update_agent(self, agent_id, agent_info):
"""
:param agent_info: Agent update information.
E.g {"admin_state_up": True}
"""
- uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
+ uri = '/agents/%s' % agent_id
agent = {"agent": agent_info}
- body = json.dumps(agent)
- resp, body = self.put(uri, body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, agent)
def show_agent(self, agent_id, **fields):
uri = '/agents/%s' % agent_id
@@ -544,88 +442,54 @@
return self._list_resources(uri, **filters)
def list_routers_on_l3_agent(self, agent_id):
- uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/agents/%s/l3-routers' % agent_id
+ return self._list_resources(uri)
def list_l3_agents_hosting_router(self, router_id):
- uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/routers/%s/l3-agents' % router_id
+ return self._list_resources(uri)
def add_router_to_l3_agent(self, agent_id, router_id):
- uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
+ uri = '/agents/%s/l3-routers' % agent_id
post_body = {"router_id": router_id}
- body = json.dumps(post_body)
- resp, body = self.post(uri, body)
- self.expected_success(201, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._create_resource(uri, post_body)
def remove_router_from_l3_agent(self, agent_id, router_id):
- uri = '%s/agents/%s/l3-routers/%s' % (
- self.uri_prefix, agent_id, router_id)
- resp, body = self.delete(uri)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
+ uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
+ return self._delete_resource(uri)
def list_dhcp_agent_hosting_network(self, network_id):
- uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/networks/%s/dhcp-agents' % network_id
+ return self._list_resources(uri)
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
- uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/agents/%s/dhcp-networks' % agent_id
+ return self._list_resources(uri)
def remove_network_from_dhcp_agent(self, agent_id, network_id):
- uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
- network_id)
- resp, body = self.delete(uri)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
+ uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
+ network_id)
+ return self._delete_resource(uri)
def update_extra_routes(self, router_id, routes):
- uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ uri = '/routers/%s' % router_id
put_body = {
'router': {
'routes': routes
}
}
- body = json.dumps(put_body)
- resp, body = self.put(uri, body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, put_body)
def delete_extra_routes(self, router_id):
- uri = '%s/routers/%s' % (self.uri_prefix, router_id)
- null_routes = None
+ uri = '/routers/%s' % router_id
put_body = {
'router': {
- 'routes': null_routes
+ 'routes': None
}
}
- body = json.dumps(put_body)
- resp, body = self.put(uri, body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ return self._update_resource(uri, put_body)
def add_dhcp_agent_to_network(self, agent_id, network_id):
post_body = {'network_id': network_id}
- body = json.dumps(post_body)
- uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
- resp, body = self.post(uri, body)
- self.expected_success(201, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
+ uri = '/agents/%s/dhcp-networks' % agent_id
+ return self._create_resource(uri, post_body)
diff --git a/tempest/services/volume/json/backups_client.py b/tempest/services/volume/json/backups_client.py
index 0f83b8d..8d34230 100644
--- a/tempest/services/volume/json/backups_client.py
+++ b/tempest/services/volume/json/backups_client.py
@@ -17,6 +17,8 @@
from oslo_serialization import jsonutils as json
+from tempest_lib import exceptions as lib_exc
+
from tempest.common import service_client
from tempest import exceptions
@@ -75,6 +77,24 @@
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, body['backups'])
+ def export_backup(self, backup_id):
+ """Export backup metadata record."""
+ url = "backups/%s/export_record" % backup_id
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return service_client.ResponseBody(resp, body['backup-record'])
+
+ def import_backup(self, backup_service, backup_url):
+ """Import backup metadata record."""
+ post_body = {'backup_service': backup_service,
+ 'backup_url': backup_url}
+ post_body = json.dumps({'backup-record': post_body})
+ resp, body = self.post("backups/import_record", post_body)
+ body = json.loads(body)
+ self.expected_success(201, resp.status)
+ return service_client.ResponseBody(resp, body['backup'])
+
def wait_for_backup_status(self, backup_id, status):
"""Waits for a Backup to reach a given status."""
body = self.show_backup(backup_id)
@@ -95,6 +115,18 @@
self.build_timeout))
raise exceptions.TimeoutException(message)
+ def wait_for_backup_deletion(self, backup_id):
+ """Waits for backup deletion"""
+ start_time = int(time.time())
+ while True:
+ try:
+ self.show_backup(backup_id)
+ except lib_exc.NotFound:
+ return
+ if int(time.time()) - start_time >= self.build_timeout:
+ raise exceptions.TimeoutException
+ time.sleep(self.build_interval)
+
class BackupsClient(BaseBackupsClient):
"""Volume V1 Backups client"""
diff --git a/tempest/test.py b/tempest/test.py
index 0e60041..df6b30d 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -44,7 +44,7 @@
def attr(**kwargs):
- """A decorator which applies the testtools attr decorator
+ """A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 2701f02..45cd609 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -40,6 +40,17 @@
"""
return
+ @abc.abstractmethod
+ def register_opts(self, conf):
+ """Method to add additional configuration options to tempest. This
+ method will be run for the plugin during the register_opts() function
+ in tempest.config
+
+ :param ConfigOpts conf: The conf object that can be used to register
+ additional options on.
+ """
+ return
+
@misc.singleton
class TempestTestPluginManager(object):
@@ -64,3 +75,7 @@
for plug in self.ext_plugins:
load_tests_dict[plug.name] = plug.obj.load_tests()
return load_tests_dict
+
+ def register_plugin_opts(self, conf):
+ for plug in self.ext_plugins:
+ plug.obj.register_opts(conf)
diff --git a/tempest/tests/services/compute/test_agents_client.py b/tempest/tests/services/compute/test_agents_client.py
index e8ea525..d268a18 100644
--- a/tempest/tests/services/compute/test_agents_client.py
+++ b/tempest/tests/services/compute/test_agents_client.py
@@ -32,10 +32,9 @@
'compute', 'regionOne')
def _test_list_agents(self, bytes_body=False):
+ body = '{"agents": []}'
if bytes_body:
- body = bytes(b'{"agents": []}')
- else:
- body = '{"agents": []}'
+ body = bytes(body.encode('utf-8'))
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
diff --git a/test-requirements.txt b/test-requirements.txt
index 8fcf071..65e3531 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,7 +7,6 @@
python-subunit>=0.0.18
oslosphinx>=2.5.0 # Apache-2.0
mox>=0.5.3
-mock>=1.1;python_version!='2.6'
-mock==1.0.1;python_version=='2.6'
+mock>=1.2
coverage>=3.6
oslotest>=1.7.0 # Apache-2.0