Merge "Make output from check_logs less verbose"
diff --git a/etc/accounts.yaml.sample b/etc/accounts.yaml.sample
index d191769..54fdcad 100644
--- a/etc/accounts.yaml.sample
+++ b/etc/accounts.yaml.sample
@@ -1,3 +1,7 @@
+# The number of accounts required can be estimated as CONCURRENCY x 2
+# Valid fields for credentials are defined in the descendants of
+# auth.Credentials - see KeystoneV[2|3]Credentials.CONF_ATTRIBUTES
+
- username: 'user_1'
tenant_name: 'test_tenant_1'
password: 'test_password'
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index ef56ab3..3b0b834 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -747,10 +747,10 @@
# The cidr block to allocate tenant ipv6 subnets from (string
# value)
-#tenant_network_v6_cidr=2003::/64
+#tenant_network_v6_cidr=2003::/48
# The mask bits for tenant ipv6 subnets (integer value)
-#tenant_network_v6_mask_bits=96
+#tenant_network_v6_mask_bits=64
# Whether tenant network connectivity should be evaluated
# directly (boolean value)
@@ -843,6 +843,15 @@
# expected to be enabled (list value)
#discoverable_apis=all
+# Execute (old style) container-sync tests (boolean value)
+#container_sync=true
+
+# Execute object-versioning tests (boolean value)
+#object_versioning=true
+
+# Execute discoverability tests (boolean value)
+#discoverability=true
+
[orchestration]
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index ab6aed3..b9b9b55 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -106,3 +106,17 @@
body = self.client.get_node_supported_boot_devices(self.node['uuid'])
self.assertIn('supported_boot_devices', body)
self.assertTrue(isinstance(body['supported_boot_devices'], list))
+
+ @test.attr(type='smoke')
+ def test_get_console(self):
+ _, body = self.client.get_console(self.node['uuid'])
+ con_info = ['console_enabled', 'console_info']
+ for key in con_info:
+ self.assertIn(key, body)
+
+ @test.attr(type='smoke')
+ def test_set_console_mode(self):
+ self.client.set_console_mode(self.node['uuid'], True)
+
+ _, body = self.client.get_console(self.node['uuid'])
+ self.assertEqual(True, body['console_enabled'])
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index d27d78b..93ed7ae 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -57,9 +57,9 @@
resp, quota_set = self.adm_client.get_default_quota_set(
self.demo_tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(sorted(expected_quota_set),
- sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.demo_tenant_id)
+ for quota in expected_quota_set:
+ self.assertIn(quota, quota_set.keys())
@test.attr(type='gate')
def test_update_all_quota_resources_for_tenant(self):
@@ -79,10 +79,18 @@
**new_quota_set)
default_quota_set.pop('id')
+ # NOTE(PhilDay) The following is safe as we're not updating these
+ # two quota values yet. Once the Nova change to add these is merged
+ # and the client updated to support them this can be removed
+ if 'server_groups' in default_quota_set:
+ default_quota_set.pop('server_groups')
+ if 'server_group_members' in default_quota_set:
+ default_quota_set.pop('server_group_members')
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id, **default_quota_set)
self.assertEqual(200, resp.status)
- self.assertEqual(new_quota_set, quota_set)
+ for quota in new_quota_set:
+ self.assertIn(quota, quota_set.keys())
# TODO(afazekas): merge these test cases
@test.attr(type='gate')
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 4afda03..0b29dde 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -44,7 +44,6 @@
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
@@ -58,9 +57,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
- self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+ self.create_test_server)
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
@@ -74,9 +73,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
- self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+ self.create_test_server)
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
@@ -89,7 +88,8 @@
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
- self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+ self.create_test_server)
@test.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index f4d010e..4d17557 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -54,7 +54,6 @@
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
- @test.skip_because(bug="1298131")
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
@@ -70,12 +69,11 @@
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
- self.assertRaises(exceptions.Unauthorized,
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
- @test.skip_because(bug="1298131")
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative', 'gate'])
@@ -91,7 +89,7 @@
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
- self.assertRaises(exceptions.Unauthorized,
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 187c0d4..1c5d4a3 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -47,6 +47,7 @@
self.__class__.server_id = self.rebuild_server(self.server_id)
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ImagesOneServerTestJSON, cls).setUpClass()
cls.client = cls.images_client
@@ -59,12 +60,8 @@
% cls.__name__)
raise cls.skipException(skip_msg)
- try:
- resp, server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
- except Exception:
- cls.tearDownClass()
- raise
+ resp, server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = server['id']
def _get_default_flavor_disk_size(self, flavor_id):
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 4e84e08..51d9b85 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -55,6 +55,7 @@
self.__class__.server_id = self.rebuild_server(self.server_id)
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ImagesOneServerNegativeTestJSON, cls).setUpClass()
cls.client = cls.images_client
@@ -67,12 +68,8 @@
% cls.__name__)
raise cls.skipException(skip_msg)
- try:
- resp, server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
- except Exception:
- cls.tearDownClass()
- raise
+ resp, server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = server['id']
cls.image_ids = []
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 68794b1..9f1cfc8 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -32,6 +32,7 @@
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ListImageFiltersTestJSON, cls).setUpClass()
if not CONF.service_available.glance:
@@ -69,33 +70,28 @@
return
# Create instances and snapshots via nova
- try:
- resp, cls.server1 = cls.create_test_server()
- resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
- # NOTE(sdague) this is faster than doing the sync wait_util on both
- cls.servers_client.wait_for_server_status(cls.server1['id'],
- 'ACTIVE')
+ resp, cls.server1 = cls.create_test_server()
+ resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
+ # NOTE(sdague) this is faster than doing the sync wait_util on both
+ cls.servers_client.wait_for_server_status(cls.server1['id'],
+ 'ACTIVE')
- # Create images to be used in the filter tests
- resp, cls.snapshot1 = cls.create_image_from_server(
- cls.server1['id'], wait_until='ACTIVE')
- cls.snapshot1_id = cls.snapshot1['id']
+ # Create images to be used in the filter tests
+ resp, cls.snapshot1 = cls.create_image_from_server(
+ cls.server1['id'], wait_until='ACTIVE')
+ cls.snapshot1_id = cls.snapshot1['id']
- # Servers have a hidden property for when they are being imaged
- # Performing back-to-back create image calls on a single
- # server will sometimes cause failures
- resp, cls.snapshot3 = cls.create_image_from_server(
- cls.server2['id'], wait_until='ACTIVE')
- cls.snapshot3_id = cls.snapshot3['id']
+ # Servers have a hidden property for when they are being imaged
+ # Performing back-to-back create image calls on a single
+ # server will sometimes cause failures
+ resp, cls.snapshot3 = cls.create_image_from_server(
+ cls.server2['id'], wait_until='ACTIVE')
+ cls.snapshot3_id = cls.snapshot3['id']
- # Wait for the server to be active after the image upload
- resp, cls.snapshot2 = cls.create_image_from_server(
- cls.server1['id'], wait_until='ACTIVE')
- cls.snapshot2_id = cls.snapshot2['id']
- except Exception:
- LOG.exception('setUpClass failed')
- cls.tearDownClass()
- raise
+ # Wait for the server to be active after the image upload
+ resp, cls.snapshot2 = cls.create_image_from_server(
+ cls.server1['id'], wait_until='ACTIVE')
+ cls.snapshot2_id = cls.snapshot2['id']
@test.attr(type='gate')
def test_list_images_filter_by_status(self):
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index eeff3ce..d7eb7ad 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -45,17 +45,17 @@
expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(sorted(expected_quota_set),
- sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
+ for quota in expected_quota_set:
+ self.assertIn(quota, quota_set.keys())
# get the quota set using user id
resp, quota_set = self.client.get_quota_set(self.tenant_id,
self.user_id)
self.assertEqual(200, resp.status)
- self.assertEqual(sorted(expected_quota_set),
- sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
+ for quota in expected_quota_set:
+ self.assertIn(quota, quota_set.keys())
@test.attr(type='smoke')
def test_get_default_quotas(self):
@@ -63,9 +63,9 @@
expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(sorted(expected_quota_set),
- sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
+ for quota in expected_quota_set:
+ self.assertIn(quota, quota_set.keys())
@test.attr(type='smoke')
def test_compare_tenant_quotas_with_default_quotas(self):
diff --git a/tempest/api/compute/v3/admin/test_quotas_negative.py b/tempest/api/compute/v3/admin/test_quotas_negative.py
index 7739f09..e115e7b 100644
--- a/tempest/api/compute/v3/admin/test_quotas_negative.py
+++ b/tempest/api/compute/v3/admin/test_quotas_negative.py
@@ -34,7 +34,6 @@
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
@@ -48,9 +47,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
- self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+ self.create_test_server)
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
@@ -64,7 +63,8 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
- self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+ self.create_test_server)
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
@@ -73,7 +73,6 @@
self.demo_tenant_id,
ram=0)
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
@@ -86,4 +85,5 @@
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
- self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
+ self.create_test_server)
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index 5eb6395..070dc2b 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -54,7 +54,6 @@
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
flavor_name = data_utils.rand_name("flavor-")
@@ -68,12 +67,11 @@
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
- self.assertRaises(exceptions.Unauthorized,
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
- @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
flavor_name = data_utils.rand_name("flavor-")
@@ -87,7 +85,7 @@
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
- self.assertRaises(exceptions.Unauthorized,
+ self.assertRaises((exceptions.Unauthorized, exceptions.OverLimit),
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 795437b..edf91a7 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -47,6 +47,7 @@
super(ImagesOneServerV3Test, self).tearDown()
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ImagesOneServerV3Test, cls).setUpClass()
cls.client = cls.images_client
@@ -54,12 +55,8 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- try:
- resp, server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
- except Exception:
- cls.tearDownClass()
- raise
+ resp, server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = server['id']
def _get_default_flavor_disk_size(self, flavor_id):
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index eed81c6..544a5a5 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -55,6 +55,7 @@
self.__class__.server_id = self.rebuild_server(self.server_id)
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ImagesOneServerNegativeV3Test, cls).setUpClass()
cls.client = cls.images_client
@@ -62,12 +63,8 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- try:
- resp, server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
- except Exception:
- cls.tearDownClass()
- raise
+ resp, server = cls.create_test_server(wait_until='ACTIVE')
+ cls.server_id = server['id']
cls.image_ids = []
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index b68c84a..6e0f431 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -25,7 +25,6 @@
"""Base test case class for all Database API tests."""
_interface = 'json'
- force_tenant_isolation = False
@classmethod
def setUpClass(cls):
diff --git a/tempest/api/identity/admin/test_tokens.py b/tempest/api/identity/admin/test_tokens.py
index e1db008..2c5fb74 100644
--- a/tempest/api/identity/admin/test_tokens.py
+++ b/tempest/api/identity/admin/test_tokens.py
@@ -35,10 +35,9 @@
tenant['id'], '')
self.data.users.append(user)
# then get a token for the user
- rsp, body = self.token_client.auth(user_name,
- user_password,
- tenant['name'])
- self.assertEqual(rsp['status'], '200')
+ _, body = self.token_client.auth(user_name,
+ user_password,
+ tenant['name'])
self.assertEqual(body['token']['tenant']['name'],
tenant['name'])
# Perform GET Token
@@ -89,15 +88,13 @@
role['id'])
# Get an unscoped token.
- resp, body = self.token_client.auth(user_name, user_password)
- self.assertEqual(200, resp.status)
+ _, body = self.token_client.auth(user_name, user_password)
token_id = body['token']['id']
# Use the unscoped token to get a token scoped to tenant1
- resp, body = self.token_client.auth_token(token_id,
- tenant=tenant1_name)
- self.assertEqual(200, resp.status)
+ _, body = self.token_client.auth_token(token_id,
+ tenant=tenant1_name)
scoped_token_id = body['token']['id']
@@ -105,9 +102,8 @@
self.client.delete_token(scoped_token_id)
# Use the unscoped token to get a token scoped to tenant2
- resp, body = self.token_client.auth_token(token_id,
- tenant=tenant2_name)
- self.assertEqual(200, resp.status)
+ _, body = self.token_client.auth_token(token_id,
+ tenant=tenant2_name)
class TokensTestXML(TokensTestJSON):
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 5838da3..d3ac6dd 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -97,10 +97,9 @@
self.token_client.auth(self.data.test_user, self.data.test_password,
self.data.test_tenant)
# Re-auth
- resp, body = self.token_client.auth(self.data.test_user,
- self.data.test_password,
- self.data.test_tenant)
- self.assertEqual('200', resp['status'])
+ self.token_client.auth(self.data.test_user,
+ self.data.test_password,
+ self.data.test_tenant)
@test.attr(type='gate')
def test_authentication_request_without_token(self):
@@ -113,10 +112,9 @@
# Delete the token from database
self.client.delete_token(token)
# Re-auth
- resp, body = self.token_client.auth(self.data.test_user,
- self.data.test_password,
- self.data.test_tenant)
- self.assertEqual('200', resp['status'])
+ self.token_client.auth(self.data.test_user,
+ self.data.test_password,
+ self.data.test_tenant)
self.client.auth_provider.clear_auth()
@test.attr(type='smoke')
@@ -205,9 +203,8 @@
# Validate the updated password
# Get a token
- resp, body = self.token_client.auth(self.data.test_user, new_pass,
- self.data.test_tenant)
- self.assertEqual('200', resp['status'])
+ _, body = self.token_client.auth(self.data.test_user, new_pass,
+ self.data.test_tenant)
self.assertTrue('id' in body['token'])
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index d40e0f3..433eaed 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -50,18 +50,16 @@
super(CredentialsTestJSON, cls).tearDownClass()
def _delete_credential(self, cred_id):
- resp, body = self.creds_client.delete_credential(cred_id)
- self.assertEqual(resp['status'], '204')
+ self.creds_client.delete_credential(cred_id)
@test.attr(type='smoke')
def test_credentials_create_get_update_delete(self):
keys = [data_utils.rand_name('Access-'),
data_utils.rand_name('Secret-')]
- resp, cred = self.creds_client.create_credential(
+ _, cred = self.creds_client.create_credential(
keys[0], keys[1], self.user_body['id'],
self.projects[0])
self.addCleanup(self._delete_credential, cred['id'])
- self.assertEqual(resp['status'], '201')
for value1 in self.creds_list[0]:
self.assertIn(value1, cred)
for value2 in self.creds_list[1]:
@@ -69,18 +67,16 @@
new_keys = [data_utils.rand_name('NewAccess-'),
data_utils.rand_name('NewSecret-')]
- resp, update_body = self.creds_client.update_credential(
+ _, update_body = self.creds_client.update_credential(
cred['id'], access_key=new_keys[0], secret_key=new_keys[1],
project_id=self.projects[1])
- self.assertEqual(resp['status'], '200')
self.assertEqual(cred['id'], update_body['id'])
self.assertEqual(self.projects[1], update_body['project_id'])
self.assertEqual(self.user_body['id'], update_body['user_id'])
self.assertEqual(update_body['blob']['access'], new_keys[0])
self.assertEqual(update_body['blob']['secret'], new_keys[1])
- resp, get_body = self.creds_client.get_credential(cred['id'])
- self.assertEqual(resp['status'], '200')
+ _, get_body = self.creds_client.get_credential(cred['id'])
for value1 in self.creds_list[0]:
self.assertEqual(update_body[value1],
get_body[value1])
@@ -94,16 +90,14 @@
fetched_cred_ids = list()
for i in range(2):
- resp, cred = self.creds_client.create_credential(
+ _, cred = self.creds_client.create_credential(
data_utils.rand_name('Access-'),
data_utils.rand_name('Secret-'),
self.user_body['id'], self.projects[0])
- self.assertEqual(resp['status'], '201')
created_cred_ids.append(cred['id'])
self.addCleanup(self._delete_credential, cred['id'])
- resp, creds = self.creds_client.list_credentials()
- self.assertEqual(resp['status'], '200')
+ _, creds = self.creds_client.list_credentials()
for i in creds:
fetched_cred_ids.append(i['id'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 6beb8f2..ad46af2 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -57,9 +57,8 @@
@test.attr(type='gate')
def test_list_endpoints(self):
# Get a list of endpoints
- resp, fetched_endpoints = self.client.list_endpoints()
+ _, fetched_endpoints = self.client.list_endpoints()
# Asserting LIST endpoints
- self.assertEqual(resp['status'], '200')
missing_endpoints =\
[e for e in self.setup_endpoints if e not in fetched_endpoints]
self.assertEqual(0, len(missing_endpoints),
@@ -71,11 +70,10 @@
region = data_utils.rand_name('region')
url = data_utils.rand_url()
interface = 'public'
- resp, endpoint =\
+ _, endpoint =\
self.client.create_endpoint(self.service_id, interface, url,
region=region, enabled=True)
# Asserting Create Endpoint response body
- self.assertEqual(resp['status'], '201')
self.assertIn('id', endpoint)
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
@@ -84,8 +82,7 @@
fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
self.assertIn(endpoint['id'], fetched_endpoints_id)
# Deleting the endpoint created in this method
- resp, body = self.client.delete_endpoint(endpoint['id'])
- self.assertEqual(resp['status'], '204')
+ _, body = self.client.delete_endpoint(endpoint['id'])
self.assertEqual(body, '')
# Checking whether endpoint is deleted successfully
resp, fetched_endpoints = self.client.list_endpoints()
@@ -116,12 +113,11 @@
region2 = data_utils.rand_name('region')
url2 = data_utils.rand_url()
interface2 = 'internal'
- resp, endpoint = \
+ _, endpoint = \
self.client.update_endpoint(endpoint_for_update['id'],
service_id=service2['id'],
interface=interface2, url=url2,
region=region2, enabled=False)
- self.assertEqual(resp['status'], '200')
# Asserting if the attributes of endpoint are updated
self.assertEqual(service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 0e79440..65c5230 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -22,8 +22,7 @@
_interface = 'json'
def _delete_policy(self, policy_id):
- resp, _ = self.policy_client.delete_policy(policy_id)
- self.assertEqual(204, resp.status)
+ self.policy_client.delete_policy(policy_id)
@test.attr(type='smoke')
def test_list_policies(self):
@@ -39,8 +38,7 @@
self.addCleanup(self._delete_policy, policy['id'])
policy_ids.append(policy['id'])
# List and Verify Policies
- resp, body = self.policy_client.list_policies()
- self.assertEqual(resp['status'], '200')
+ _, body = self.policy_client.list_policies()
for p in body:
fetched_ids.append(p['id'])
missing_pols = [p for p in policy_ids if p not in fetched_ids]
@@ -51,7 +49,7 @@
# Test to update policy
blob = data_utils.rand_name('BlobName-')
policy_type = data_utils.rand_name('PolicyType-')
- resp, policy = self.policy_client.create_policy(blob, policy_type)
+ _, policy = self.policy_client.create_policy(blob, policy_type)
self.addCleanup(self._delete_policy, policy['id'])
self.assertIn('id', policy)
self.assertIn('type', policy)
@@ -59,15 +57,13 @@
self.assertIsNotNone(policy['id'])
self.assertEqual(blob, policy['blob'])
self.assertEqual(policy_type, policy['type'])
- resp, fetched_policy = self.policy_client.get_policy(policy['id'])
- self.assertEqual(resp['status'], '200')
# Update policy
update_type = data_utils.rand_name('UpdatedPolicyType-')
- resp, data = self.policy_client.update_policy(
+ _, data = self.policy_client.update_policy(
policy['id'], type=update_type)
self.assertIn('type', data)
# Assertion for updated value with fetched value
- resp, fetched_policy = self.policy_client.get_policy(policy['id'])
+ _, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertIn('id', fetched_policy)
self.assertIn('blob', fetched_policy)
self.assertIn('type', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index c8b034f..8fc0e22 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -40,34 +40,30 @@
super(RegionsTestJSON, cls).tearDownClass()
def _delete_region(self, region_id):
- resp, _ = self.client.delete_region(region_id)
- self.assertEqual(204, resp.status)
+ self.client.delete_region(region_id)
self.assertRaises(exceptions.NotFound,
self.client.get_region, region_id)
@test.attr(type='gate')
def test_create_update_get_delete_region(self):
r_description = data_utils.rand_name('description-')
- resp, region = self.client.create_region(
+ _, region = self.client.create_region(
r_description, parent_region_id=self.setup_regions[0]['id'])
- self.assertEqual(201, resp.status)
self.addCleanup(self._delete_region, region['id'])
self.assertEqual(r_description, region['description'])
self.assertEqual(self.setup_regions[0]['id'],
region['parent_region_id'])
# Update region with new description and parent ID
r_alt_description = data_utils.rand_name('description-')
- resp, region = self.client.update_region(
+ _, region = self.client.update_region(
region['id'],
description=r_alt_description,
parent_region_id=self.setup_regions[1]['id'])
- self.assertEqual(200, resp.status)
self.assertEqual(r_alt_description, region['description'])
self.assertEqual(self.setup_regions[1]['id'],
region['parent_region_id'])
# Get the details of region
- resp, region = self.client.get_region(region['id'])
- self.assertEqual(200, resp.status)
+ _, region = self.client.get_region(region['id'])
self.assertEqual(r_alt_description, region['description'])
self.assertEqual(self.setup_regions[1]['id'],
region['parent_region_id'])
@@ -77,19 +73,17 @@
# Create a region with a specific id
r_region_id = data_utils.rand_uuid()
r_description = data_utils.rand_name('description-')
- resp, region = self.client.create_region(
+ _, region = self.client.create_region(
r_description, unique_region_id=r_region_id)
self.addCleanup(self._delete_region, region['id'])
# Asserting Create Region with specific id response body
- self.assertEqual(201, resp.status)
self.assertEqual(r_region_id, region['id'])
self.assertEqual(r_description, region['description'])
@test.attr(type='gate')
def test_list_regions(self):
# Get a list of regions
- resp, fetched_regions = self.client.list_regions()
- self.assertEqual(200, resp.status)
+ _, fetched_regions = self.client.list_regions()
missing_regions =\
[e for e in self.setup_regions if e not in fetched_regions]
# Asserting List Regions response
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 2e732fe..1f7cf48 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -141,11 +141,10 @@
self.client.add_group_user(self.group_body['id'], self.user_body['id'])
self.addCleanup(self.client.delete_group_user,
self.group_body['id'], self.user_body['id'])
- resp, body = self.token.auth(self.user_body['id'], self.u_password,
- self.project['name'],
- domain=self.domain['name'])
+ _, body = self.token.auth(self.user_body['id'], self.u_password,
+ self.project['name'],
+ domain=self.domain['name'])
roles = body['token']['roles']
- self.assertEqual(resp['status'], '201')
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0]['id'], self.role['id'])
# Revoke role to group on project
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index f6078da..7e21cc3 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -13,41 +13,84 @@
# License for the specific language governing permissions and limitations
# under the License.
-
from tempest.api.identity import base
from tempest.common.utils import data_utils
+from tempest import exceptions
from tempest import test
class ServicesTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
- @test.attr(type='gate')
- def test_update_service(self):
- # Update description attribute of service
- name = data_utils.rand_name('service-')
- serv_type = data_utils.rand_name('type--')
- desc = data_utils.rand_name('description-')
- _, body = self.service_client.create_service(name, serv_type,
- description=desc)
- # Deleting the service created in this method
- self.addCleanup(self.service_client.delete_service, body['id'])
+ def _del_service(self, service_id):
+ # Used for deleting the services created in this class
+ self.service_client.delete_service(service_id)
+ # Checking whether service is deleted successfully
+ self.assertRaises(exceptions.NotFound, self.service_client.get_service,
+ service_id)
- s_id = body['id']
- resp1_desc = body['description']
+ @test.attr(type='smoke')
+ def test_create_update_get_service(self):
+ # Creating a Service
+ name = data_utils.rand_name('service')
+ serv_type = data_utils.rand_name('type')
+ desc = data_utils.rand_name('description')
+ _, create_service = self.service_client.create_service(
+ serv_type, name=name, description=desc)
+ self.addCleanup(self._del_service, create_service['id'])
+ self.assertIsNotNone(create_service['id'])
- s_desc2 = data_utils.rand_name('desc2-')
- _, body = self.service_client.update_service(
+ # Verifying response body of create service
+ expected_data = {'name': name, 'type': serv_type, 'description': desc}
+ self.assertDictContainsSubset(expected_data, create_service)
+
+ # Update description
+ s_id = create_service['id']
+ resp1_desc = create_service['description']
+ s_desc2 = data_utils.rand_name('desc2')
+ _, update_service = self.service_client.update_service(
s_id, description=s_desc2)
- resp2_desc = body['description']
+ resp2_desc = update_service['description']
+
self.assertNotEqual(resp1_desc, resp2_desc)
# Get service
- _, body = self.service_client.get_service(s_id)
- resp3_desc = body['description']
+ _, fetched_service = self.service_client.get_service(s_id)
+ resp3_desc = fetched_service['description']
- self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(resp2_desc, resp3_desc)
+ self.assertDictContainsSubset(update_service, fetched_service)
+
+ @test.attr(type='smoke')
+ def test_create_service_without_description(self):
+ # Create a service only with name and type
+ name = data_utils.rand_name('service')
+ serv_type = data_utils.rand_name('type')
+ _, service = self.service_client.create_service(
+ serv_type, name=name)
+ self.addCleanup(self.service_client.delete_service, service['id'])
+ self.assertIn('id', service)
+ expected_data = {'name': name, 'type': serv_type}
+ self.assertDictContainsSubset(expected_data, service)
+
+ @test.attr(type='smoke')
+ def test_list_services(self):
+ # Create, List, Verify and Delete Services
+ service_ids = list()
+ for _ in range(3):
+ name = data_utils.rand_name('service')
+ serv_type = data_utils.rand_name('type')
+ _, create_service = self.service_client.create_service(
+ serv_type, name=name)
+ self.addCleanup(self.service_client.delete_service,
+ create_service['id'])
+ service_ids.append(create_service['id'])
+
+ # List and Verify Services
+ _, services = self.service_client.list_services()
+ fetched_ids = [service['id'] for service in services]
+ found = [s for s in fetched_ids if s in service_ids]
+ self.assertEqual(len(found), len(service_ids))
class ServicesTestXML(ServicesTestJSON):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index bd08614..230e09f 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -35,8 +35,7 @@
email=u_email)
self.addCleanup(self.client.delete_user, user['id'])
# Perform Authentication
- resp, body = self.token.auth(user['id'], u_password)
- self.assertEqual(201, resp.status)
+ resp, _ = self.token.auth(user['id'], u_password)
subject_token = resp['x-subject-token']
# Perform GET Token
_, token_details = self.client.get_token(subject_token)
@@ -48,7 +47,6 @@
self.assertRaises(exceptions.NotFound, self.client.get_token,
subject_token)
- @test.skip_because(bug="1351026")
@test.attr(type='gate')
def test_rescope_token(self):
"""Rescope a token.
@@ -89,7 +87,6 @@
# Get an unscoped token.
resp, token_auth = self.token.auth(user=user['id'],
password=user_password)
- self.assertEqual(201, resp.status)
token_id = resp['x-subject-token']
orig_expires_at = token_auth['token']['expires_at']
@@ -114,7 +111,6 @@
tenant=project1_name,
domain='Default')
token1_id = resp['x-subject-token']
- self.assertEqual(201, resp.status)
self.assertEqual(orig_expires_at, token_auth['token']['expires_at'],
'Expiration time should match original token')
@@ -141,10 +137,9 @@
self.client.delete_token(token1_id)
# Now get another scoped token using the unscoped token.
- resp, token_auth = self.token.auth(token=token_id,
- tenant=project2_name,
- domain='Default')
- self.assertEqual(201, resp.status)
+ _, token_auth = self.token.auth(token=token_id,
+ tenant=project2_name,
+ domain='Default')
self.assertEqual(project2['id'],
token_auth['token']['project']['id'])
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 3c25819..898bcd0 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -77,8 +77,7 @@
new_password = data_utils.rand_name('pass1')
self.client.update_user_password(user['id'], new_password,
original_password)
- resp, body = self.token.auth(user['id'], new_password)
- self.assertEqual(201, resp.status)
+ resp, _ = self.token.auth(user['id'], new_password)
subject_token = resp['x-subject-token']
# Perform GET Token to verify and confirm password is updated
_, token_details = self.client.get_token(subject_token)
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 6eec79e..9300c5e 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -72,15 +72,18 @@
self.client.wait_for_resource_deletion('firewall', fw_id)
- def _wait_for_active(self, fw_id):
+ def _wait_until_ready(self, fw_id):
+ target_states = ('ACTIVE', 'CREATED')
+
def _wait():
_, firewall = self.client.show_firewall(fw_id)
firewall = firewall['firewall']
- return firewall['status'] == 'ACTIVE'
+ return firewall['status'] in target_states
if not test.call_until_true(_wait, CONF.network.build_timeout,
CONF.network.build_interval):
- m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+ m = ("Timed out waiting for firewall %s to reach %s state(s)" %
+ (fw_id, target_states))
raise exceptions.TimeoutException(m)
@test.attr(type='smoke')
@@ -190,7 +193,8 @@
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
- self._wait_for_active(firewall_id)
+ # Wait for the firewall resource to become ready
+ self._wait_until_ready(firewall_id)
# show a created firewall
_, firewall = self.client.show_firewall(firewall_id)
diff --git a/tempest/api/network/test_metering_extensions.py b/tempest/api/network/test_metering_extensions.py
index 5b8db43..0cc218b 100644
--- a/tempest/api/network/test_metering_extensions.py
+++ b/tempest/api/network/test_metering_extensions.py
@@ -35,6 +35,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(MeteringJSON, cls).setUpClass()
if not test.is_extension_enabled('metering', 'network'):
@@ -42,17 +43,12 @@
raise cls.skipException(msg)
description = "metering label created by tempest"
name = data_utils.rand_name("metering-label")
- try:
- cls.metering_label = cls.create_metering_label(name, description)
- remote_ip_prefix = "10.0.0.0/24"
- direction = "ingress"
- cls.metering_label_rule = cls.create_metering_label_rule(
- remote_ip_prefix, direction,
- metering_label_id=cls.metering_label['id'])
- except Exception:
- LOG.exception('setUpClass failed')
- cls.tearDownClass()
- raise
+ cls.metering_label = cls.create_metering_label(name, description)
+ remote_ip_prefix = "10.0.0.0/24"
+ direction = "ingress"
+ cls.metering_label_rule = cls.create_metering_label_rule(
+ remote_ip_prefix, direction,
+ metering_label_id=cls.metering_label['id'])
def _delete_metering_label(self, metering_label_id):
# Deletes a label and verifies if it is deleted or not
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index f06d17c..26f6b8f 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -16,6 +16,7 @@
import socket
from tempest.api.network import base
+from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
@@ -72,18 +73,12 @@
_, body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
- self.assertEqual(port['id'], self.port['id'])
- self.assertEqual(self.port['admin_state_up'], port['admin_state_up'])
- self.assertEqual(self.port['device_id'], port['device_id'])
- self.assertEqual(self.port['device_owner'], port['device_owner'])
- self.assertEqual(self.port['mac_address'], port['mac_address'])
- self.assertEqual(self.port['name'], port['name'])
- self.assertEqual(self.port['security_groups'],
- port['security_groups'])
- self.assertEqual(self.port['network_id'], port['network_id'])
- self.assertEqual(self.port['security_groups'],
- port['security_groups'])
- self.assertEqual(port['fixed_ips'], [])
+ # TODO(Santosh)- This is a temporary workaround to compare create_port
+ # and show_port dict elements.Remove this once extra_dhcp_opts issue
+ # gets fixed in neutron.( bug - 1365341.)
+ self.assertThat(self.port,
+ custom_matchers.MatchesDictExceptForKeys
+ (port, excluded_keys=['extra_dhcp_opts']))
@test.attr(type='smoke')
def test_show_port_fields(self):
@@ -134,6 +129,7 @@
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
+ @test.skip_because(bug="1364166")
@test.attr(type='smoke')
def test_update_port_with_second_ip(self):
# Create a network with two subnets
@@ -249,10 +245,10 @@
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(PortsIpV6TestJSON, cls).setUpClass()
if not CONF.network_feature_enabled.ipv6:
- cls.tearDownClass()
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
@@ -274,6 +270,5 @@
super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
-class PortsAdminExtendedAttrsIpV6TestXML(
- PortsAdminExtendedAttrsIpV6TestJSON):
+class PortsAdminExtendedAttrsIpV6TestXML(PortsAdminExtendedAttrsIpV6TestJSON):
_interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index ccc0067..a143659 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -69,10 +69,11 @@
cls.object_client_alt.auth_provider.clear_auth()
cls.container_client_alt.auth_provider.clear_auth()
- cls.data = base.DataGenerator(cls.identity_admin_client)
+ cls.data = SwiftDataGenerator(cls.identity_admin_client)
@classmethod
def tearDownClass(cls):
+ cls.data.teardown_all()
cls.isolated_creds.clear_isolated_creds()
super(BaseObjectTest, cls).tearDownClass()
@@ -116,3 +117,28 @@
self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
target, method))
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+
+
+class SwiftDataGenerator(base.DataGenerator):
+
+ def setup_test_user(self, reseller=False):
+ super(SwiftDataGenerator, self).setup_test_user()
+ if reseller:
+ role_name = CONF.object_storage.reseller_admin_role
+ else:
+ role_name = CONF.object_storage.operator_role
+ role_id = self._get_role_id(role_name)
+ self._assign_role(role_id)
+
+ def _get_role_id(self, role_name):
+ try:
+ _, roles = self.client.list_roles()
+ return next(r['id'] for r in roles if r['name'] == role_name)
+ except StopIteration:
+ msg = "Role name '%s' is not found" % role_name
+ raise exceptions.NotFound(msg)
+
+ def _assign_role(self, role_id):
+ self.client.assign_user_role(self.tenant['id'],
+ self.user['id'],
+ role_id)
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 19e3068..c1eb897 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -18,7 +18,6 @@
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
-from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -33,32 +32,10 @@
cls.container_name = data_utils.rand_name(name="TestContainer")
cls.container_client.create_container(cls.container_name)
- cls.data.setup_test_user()
+ cls.data.setup_test_user(reseller=True)
cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
- # Retrieve the ResellerAdmin role id
- reseller_role_id = None
- try:
- _, roles = cls.os_admin.identity_client.list_roles()
- reseller_role_id = next(r['id'] for r in roles if r['name']
- == CONF.object_storage.reseller_admin_role)
- except StopIteration:
- msg = "No ResellerAdmin role found"
- raise exceptions.NotFound(msg)
-
- # Retrieve the ResellerAdmin user id
- reseller_user_id = cls.data.test_credentials.user_id
-
- # Retrieve the ResellerAdmin tenant id
- reseller_tenant_id = cls.data.test_credentials.tenant_id
-
- # Assign the newly created user the appropriate ResellerAdmin role
- cls.os_admin.identity_client.assign_user_role(
- reseller_tenant_id,
- reseller_user_id,
- reseller_role_id)
-
# Retrieve a ResellerAdmin auth data and use it to set a quota
# on the client's account
cls.reselleradmin_auth_data = \
@@ -97,7 +74,6 @@
def tearDownClass(cls):
if hasattr(cls, "container_name"):
cls.delete_containers([cls.container_name])
- cls.data.teardown_all()
super(AccountQuotasTest, cls).tearDownClass()
@test.attr(type="smoke")
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 6afd381..7324c2e 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -33,32 +33,10 @@
cls.container_name = data_utils.rand_name(name="TestContainer")
cls.container_client.create_container(cls.container_name)
- cls.data.setup_test_user()
+ cls.data.setup_test_user(reseller=True)
cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
- # Retrieve the ResellerAdmin role id
- reseller_role_id = None
- try:
- _, roles = cls.os_admin.identity_client.list_roles()
- reseller_role_id = next(r['id'] for r in roles if r['name']
- == CONF.object_storage.reseller_admin_role)
- except StopIteration:
- msg = "No ResellerAdmin role found"
- raise exceptions.NotFound(msg)
-
- # Retrieve the ResellerAdmin tenant id
- reseller_user_id = cls.data.test_credentials.user_id
-
- # Retrieve the ResellerAdmin tenant id
- reseller_tenant_id = cls.data.test_credentials.tenant_id
-
- # Assign the newly created user the appropriate ResellerAdmin role
- cls.os_admin.identity_client.assign_user_role(
- reseller_tenant_id,
- reseller_user_id,
- reseller_role_id)
-
# Retrieve a ResellerAdmin auth data and use it to set a quota
# on the client's account
cls.reselleradmin_auth_data = \
@@ -96,7 +74,6 @@
def tearDownClass(cls):
if hasattr(cls, "container_name"):
cls.delete_containers([cls.container_name])
- cls.data.teardown_all()
super(AccountQuotasNegativeTest, cls).tearDownClass()
@test.attr(type=["negative", "smoke"])
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index d615374..69cba1e 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -14,15 +14,14 @@
# under the License.
import random
-
from six import moves
+import testtools
from tempest.api.object_storage import base
from tempest import clients
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
-from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -45,7 +44,6 @@
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
- cls.data.teardown_all()
super(AccountTest, cls).tearDownClass()
@test.attr(type='smoke')
@@ -66,35 +64,7 @@
# the base user of this instance.
self.data.setup_test_user()
- os_test_user = clients.Manager(
- self.data.test_credentials)
-
- # Retrieve the id of an operator role of object storage
- test_role_id = None
- swift_role = CONF.object_storage.operator_role
- try:
- _, roles = self.os_admin.identity_client.list_roles()
- test_role_id = next(r['id'] for r in roles if r['name']
- == swift_role)
- except StopIteration:
- msg = "%s role found" % swift_role
- raise exceptions.NotFound(msg)
-
- # Retrieve the test_user id
- _, users = self.os_admin.identity_client.get_users()
- test_user_id = next(usr['id'] for usr in users if usr['name']
- == self.data.test_user)
-
- # Retrieve the test_tenant id
- _, tenants = self.os_admin.identity_client.list_tenants()
- test_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
- == self.data.test_tenant)
-
- # Assign the newly created user the appropriate operator role
- self.os_admin.identity_client.assign_user_role(
- test_tenant_id,
- test_user_id,
- test_role_id)
+ os_test_user = clients.Manager(self.data.test_credentials)
resp, container_list = \
os_test_user.account_client.list_account_containers()
@@ -148,6 +118,9 @@
self.assertEqual(container_list.find(".//bytes").tag, 'bytes')
@test.attr(type='smoke')
+ @testtools.skipIf(
+ not CONF.object_storage_feature_enabled.discoverability,
+ 'Discoverability function is disabled')
def test_list_extensions(self):
resp, extensions = self.account_client.list_extensions()
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index 490672d..e4c46e2 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -47,5 +47,3 @@
self.assertRaises(exceptions.Unauthorized,
self.custom_account_client.list_account_containers,
params=params)
- # delete the user which was created
- self.data.teardown_all()
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index fc51504..a7d45be 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -27,11 +27,6 @@
test_os = clients.Manager(cls.data.test_credentials)
cls.test_auth_data = test_os.auth_provider.auth_data
- @classmethod
- def tearDownClass(cls):
- cls.data.teardown_all()
- super(ObjectTestACLs, cls).tearDownClass()
-
def setUp(self):
super(ObjectTestACLs, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index ca53876..1a21ecc 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -29,11 +29,6 @@
test_os = clients.Manager(cls.data.test_credentials)
cls.test_auth_data = test_os.auth_provider.auth_data
- @classmethod
- def tearDownClass(cls):
- cls.data.teardown_all()
- super(ObjectACLsNegativeTest, cls).tearDownClass()
-
def setUp(self):
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 581c6d9..28bde24 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -48,7 +48,6 @@
def tearDownClass(cls):
if hasattr(cls, "container_name"):
cls.delete_containers([cls.container_name])
- cls.data.teardown_all()
super(StaticWebTest, cls).tearDownClass()
@test.requires_ext(extension='staticweb', service='object')
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 5f46d01..3e6d58c 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
import time
import urlparse
@@ -68,6 +69,9 @@
@test.attr(type='slow')
@test.skip_because(bug='1317133')
+ @testtools.skipIf(
+ not CONF.object_storage_feature_enabled.container_sync,
+ 'Old-style container sync function is disabled')
def test_container_synchronization(self):
# container to container synchronization
# to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index d1541b9..ad7e068 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -15,7 +15,6 @@
# under the License.
from tempest.api.object_storage import base
-from tempest import clients
from tempest.common import custom_matchers
from tempest import test
@@ -25,11 +24,6 @@
@classmethod
def setUpClass(cls):
super(CrossdomainTest, cls).setUpClass()
- # creates a test user. The test user will set its base_url to the Swift
- # endpoint and test the healthcheck feature.
- cls.data.setup_test_user()
-
- cls.os_test_user = clients.Manager(cls.data.test_credentials)
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
@@ -38,29 +32,16 @@
cls.xml_end = "</cross-domain-policy>"
- @classmethod
- def tearDownClass(cls):
- cls.data.teardown_all()
- super(CrossdomainTest, cls).tearDownClass()
-
def setUp(self):
super(CrossdomainTest, self).setUp()
- client = self.os_test_user.account_client
# Turning http://.../v1/foobar into http://.../
- client.skip_path()
-
- def tearDown(self):
- # clear the base_url for subsequent requests
- self.os_test_user.account_client.reset_path()
-
- super(CrossdomainTest, self).tearDown()
+ self.account_client.skip_path()
@test.attr('gate')
@test.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
- resp, body = self.os_test_user.account_client.get("crossdomain.xml",
- {})
+ resp, body = self.account_client.get("crossdomain.xml", {})
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertTrue(body.startswith(self.xml_start) and
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index dc5585e..a0fb708 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -59,7 +59,6 @@
def tearDownClass(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
cls.delete_containers(cls.containers)
- cls.data.teardown_all()
super(ObjectFormPostTest, cls).tearDownClass()
def get_multipart_form(self, expires=600):
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 878bf6d..103bc8e 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -59,7 +59,6 @@
def tearDownClass(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
cls.delete_containers(cls.containers)
- cls.data.teardown_all()
super(ObjectFormPostNegativeTest, cls).tearDownClass()
def get_multipart_form(self, expires=600):
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 0443a80..159ad5c 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -59,23 +59,23 @@
object_name_base_1 = object_name + '_01'
object_name_base_2 = object_name + '_02'
data_size = MIN_SEGMENT_SIZE
- self.data = data_utils.arbitrary_string(data_size)
+ self.content = data_utils.arbitrary_string(data_size)
self._create_object(self.container_name,
object_name_base_1,
- self.data)
+ self.content)
self._create_object(self.container_name,
object_name_base_2,
- self.data)
+ self.content)
path_object_1 = '/%s/%s' % (self.container_name,
object_name_base_1)
path_object_2 = '/%s/%s' % (self.container_name,
object_name_base_2)
data_manifest = [{'path': path_object_1,
- 'etag': hashlib.md5(self.data).hexdigest(),
+ 'etag': hashlib.md5(self.content).hexdigest(),
'size_bytes': data_size},
{'path': path_object_2,
- 'etag': hashlib.md5(self.data).hexdigest(),
+ 'etag': hashlib.md5(self.content).hexdigest(),
'size_bytes': data_size}]
return json.dumps(data_manifest)
@@ -147,7 +147,7 @@
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self._assertHeadersSLO(resp, 'GET')
- sum_data = self.data + self.data
+ sum_data = self.content + self.content
self.assertEqual(body, sum_data)
@test.attr(type='gate')
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index 264a18a..f5ebce7 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -59,8 +59,6 @@
cls.delete_containers(cls.containers)
- # delete the user setup created
- cls.data.teardown_all()
super(ObjectTempUrlTest, cls).tearDownClass()
def setUp(self):
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 7d26433..28173fe 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -53,8 +53,6 @@
cls.delete_containers(cls.containers)
- # delete the user setup created
- cls.data.teardown_all()
super(ObjectTempUrlNegativeTest, cls).tearDownClass()
def setUp(self):
@@ -69,10 +67,10 @@
# create object
self.object_name = data_utils.rand_name(name='ObjectTemp')
- self.data = data_utils.arbitrary_string(size=len(self.object_name),
- base_text=self.object_name)
+ self.content = data_utils.arbitrary_string(size=len(self.object_name),
+ base_text=self.object_name)
self.object_client.create_object(self.container_name,
- self.object_name, self.data)
+ self.object_name, self.content)
def _get_expiry_date(self, expiration_time=1000):
return int(time.time() + expiration_time)
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 8d2ff9b..971449d 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -13,10 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class ContainerTest(base.BaseObjectTest):
@classmethod
@@ -41,6 +46,9 @@
self.assertEqual(header_value, versioned)
@test.attr(type='smoke')
+ @testtools.skipIf(
+ not CONF.object_storage_feature_enabled.object_versioning,
+ 'Object-versioning is disabled')
def test_versioned_container(self):
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index cbe62a1..d7c2a0d 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -66,9 +66,9 @@
params = {'format': 'json'}
_, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEqual(2, len(container_list))
- for cont in container_list:
- self.assertTrue(cont['name'].startswith(self.stack_name))
+ created_containers = [cont for cont in container_list
+ if cont['name'].startswith(self.stack_name)]
+ self.assertEqual(2, len(created_containers))
@test.services('object_storage')
def test_acl(self):
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
index 791a19b..98761ac 100644
--- a/tempest/api/orchestration/stacks/test_update.py
+++ b/tempest/api/orchestration/stacks/test_update.py
@@ -61,7 +61,6 @@
self.list_resources(stack_identifier))
@test.attr(type='gate')
- @test.skip_because(bug='1308682')
def test_stack_update_add_remove(self):
stack_name = data_utils.rand_name('heat')
stack_identifier = self.create_stack(stack_name, self.template)
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index f3b1ad5..769f5e0 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -67,13 +67,13 @@
_, self.volume = self.volume_client.create_volume(
size=1, display_name=vol_name, volume_type=type_name)
- self.volume_client.wait_for_volume_status(
- self.volume['id'], 'available')
if with_prefix:
self.volume_id_list_with_prefix.append(self.volume['id'])
else:
self.volume_id_list_without_prefix.append(
self.volume['id'])
+ self.volume_client.wait_for_volume_status(
+ self.volume['id'], 'available')
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 3cd0827..43f48ff 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -148,11 +148,11 @@
_api_version = 1
-class BaseVolumeV1AdminTest(BaseVolumeV1Test):
+class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
@classmethod
def setUpClass(cls):
- super(BaseVolumeV1AdminTest, cls).setUpClass()
+ super(BaseVolumeAdminTest, cls).setUpClass()
cls.adm_user = CONF.identity.admin_username
cls.adm_pass = CONF.identity.admin_password
cls.adm_tenant = CONF.identity.admin_tenant_name
@@ -160,11 +160,62 @@
msg = ("Missing Volume Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
+
if CONF.compute.allow_tenant_isolation:
cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_adm = clients.AdminManager(interface=cls._interface)
+
+ cls.qos_specs = []
+
cls.client = cls.os_adm.volume_types_client
cls.hosts_client = cls.os_adm.volume_hosts_client
cls.quotas_client = cls.os_adm.volume_quotas_client
+ cls.volume_types_client = cls.os_adm.volume_types_client
+
+ if cls._api_version == 1:
+ if not CONF.volume_feature_enabled.api_v1:
+ msg = "Volume API v1 is disabled"
+ raise cls.skipException(msg)
+ cls.volume_qos_client = cls.os_adm.volume_qos_client
+ elif cls._api_version == 2:
+ if not CONF.volume_feature_enabled.api_v2:
+ msg = "Volume API v2 is disabled"
+ raise cls.skipException(msg)
+ cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.clear_qos_specs()
+ super(BaseVolumeAdminTest, cls).tearDownClass()
+
+ @classmethod
+ def create_test_qos_specs(cls, name=None, consumer=None, **kwargs):
+ """create a test Qos-Specs."""
+ name = name or data_utils.rand_name(cls.__name__ + '-QoS')
+ consumer = consumer or 'front-end'
+ _, qos_specs = cls.volume_qos_client.create_qos(name, consumer,
+ **kwargs)
+ cls.qos_specs.append(qos_specs['id'])
+ return qos_specs
+
+ @classmethod
+ def clear_qos_specs(cls):
+ for qos_id in cls.qos_specs:
+ try:
+ cls.volume_qos_client.delete_qos(qos_id)
+ except exceptions.NotFound:
+ # The qos_specs may have already been deleted which is OK.
+ pass
+
+ for qos_id in cls.qos_specs:
+ try:
+ cls.volume_qos_client.wait_for_resource_deletion(qos_id)
+ except exceptions.NotFound:
+ # The qos_specs may have already been deleted which is OK.
+ pass
+
+
+class BaseVolumeV1AdminTest(BaseVolumeAdminTest):
+ _api_version = 1
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
new file mode 100644
index 0000000..8b6ba49
--- /dev/null
+++ b/tempest/api/volume/test_qos.py
@@ -0,0 +1,176 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.common.utils import data_utils as utils
+from tempest import test
+
+
+class QosSpecsV2TestJSON(base.BaseVolumeAdminTest):
+ """Test the Cinder QoS-specs.
+
+ Tests for create, list, delete, show, associate,
+ disassociate, set/unset key V2 APIs.
+ """
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(QosSpecsV2TestJSON, cls).setUpClass()
+ # Create admin qos client
+ # Create a test shared qos-specs for tests
+ cls.qos_name = utils.rand_name(cls.__name__ + '-QoS')
+ cls.qos_consumer = 'front-end'
+
+ cls.created_qos = cls.create_test_qos_specs(cls.qos_name,
+ cls.qos_consumer,
+ read_iops_sec='2000')
+
+ def _create_delete_test_qos_with_given_consumer(self, consumer):
+ name = utils.rand_name('qos')
+ qos = {'name': name, 'consumer': consumer}
+ body = self.create_test_qos_specs(name, consumer)
+ for key in ['name', 'consumer']:
+ self.assertEqual(qos[key], body[key])
+
+ self.volume_qos_client.delete_qos(body['id'])
+ self.volume_qos_client.wait_for_resource_deletion(body['id'])
+
+ # validate the deletion
+ _, list_qos = self.volume_qos_client.list_qos()
+ self.assertNotIn(body, list_qos)
+
+ def _create_test_volume_type(self):
+ vol_type_name = utils.rand_name("volume-type")
+ _, vol_type = self.volume_types_client.create_volume_type(
+ vol_type_name)
+ self.addCleanup(self.volume_types_client.delete_volume_type,
+ vol_type['id'])
+ return vol_type
+
+ def _test_associate_qos(self, vol_type_id):
+ self.volume_qos_client.associate_qos(
+ self.created_qos['id'], vol_type_id)
+
+ def _test_get_association_qos(self):
+ _, body = self.volume_qos_client.get_association_qos(
+ self.created_qos['id'])
+
+ associations = []
+ for association in body:
+ associations.append(association['id'])
+
+ return associations
+
+ def test_create_delete_qos_with_front_end_consumer(self):
+ """Tests the creation and deletion of QoS specs
+
+ With consumer as front end
+ """
+ self._create_delete_test_qos_with_given_consumer('front-end')
+
+ def test_create_delete_qos_with_back_end_consumer(self):
+ """Tests the creation and deletion of QoS specs
+
+ With consumer as back-end
+ """
+ self._create_delete_test_qos_with_given_consumer('back-end')
+
+ @test.attr(type='smoke')
+ def test_create_delete_qos_with_both_consumer(self):
+ """Tests the creation and deletion of QoS specs
+
+ With consumer as both front end and back end
+ """
+ self._create_delete_test_qos_with_given_consumer('both')
+
+ @test.attr(type='smoke')
+ def test_get_qos(self):
+ """Tests the detail of a given qos-specs"""
+ _, body = self.volume_qos_client.get_qos(self.created_qos['id'])
+ self.assertEqual(self.qos_name, body['name'])
+ self.assertEqual(self.qos_consumer, body['consumer'])
+
+ @test.attr(type='smoke')
+ def test_list_qos(self):
+ """Tests the list of all qos-specs"""
+ _, body = self.volume_qos_client.list_qos()
+ self.assertIn(self.created_qos, body)
+
+ @test.attr(type='smoke')
+ def test_set_unset_qos_key(self):
+ """Test the addition of a specs key to qos-specs"""
+ args = {'iops_bytes': '500'}
+ _, body = self.volume_qos_client.set_qos_key(self.created_qos['id'],
+ iops_bytes='500')
+ self.assertEqual(args, body)
+ _, body = self.volume_qos_client.get_qos(self.created_qos['id'])
+ self.assertEqual(args['iops_bytes'], body['specs']['iops_bytes'])
+
+ # test the deletion of a specs key from qos-specs
+ keys = ['iops_bytes']
+ self.volume_qos_client.unset_qos_key(self.created_qos['id'], keys)
+ operation = 'qos-key-unset'
+ self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
+ operation, keys)
+ _, body = self.volume_qos_client.get_qos(self.created_qos['id'])
+ self.assertNotIn(keys[0], body['specs'])
+
+ @test.attr(type='smoke')
+ def test_associate_disassociate_qos(self):
+ """Test the following operations :
+
+ 1. associate_qos
+ 2. get_association_qos
+ 3. disassociate_qos
+ 4. disassociate_all_qos
+ """
+
+ # create a test volume-type
+ vol_type = []
+ for _ in range(0, 3):
+ vol_type.append(self._create_test_volume_type())
+
+ # associate the qos-specs with volume-types
+ for i in range(0, 3):
+ self._test_associate_qos(vol_type[i]['id'])
+
+ # get the association of the qos-specs
+ associations = self._test_get_association_qos()
+
+ for i in range(0, 3):
+ self.assertIn(vol_type[i]['id'], associations)
+
+ # disassociate a volume-type with qos-specs
+ self.volume_qos_client.disassociate_qos(
+ self.created_qos['id'], vol_type[0]['id'])
+ operation = 'disassociate'
+ self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
+ operation,
+ vol_type[0]['id'])
+ associations = self._test_get_association_qos()
+ self.assertNotIn(vol_type[0]['id'], associations)
+
+ # disassociate all volume-types from qos-specs
+ self.volume_qos_client.disassociate_all_qos(
+ self.created_qos['id'])
+ operation = 'disassociate-all'
+ self.volume_qos_client.wait_for_qos_operations(self.created_qos['id'],
+ operation)
+ associations = self._test_get_association_qos()
+ self.assertEmpty(associations)
+
+
+class QosSpecsV1TestJSON(QosSpecsV2TestJSON):
+ _api_version = 1
diff --git a/tempest/api_schema/response/compute/v2/hypervisors.py b/tempest/api_schema/response/compute/v2/hypervisors.py
index 1878881..cbb7698 100644
--- a/tempest/api_schema/response/compute/v2/hypervisors.py
+++ b/tempest/api_schema/response/compute/v2/hypervisors.py
@@ -26,11 +26,7 @@
'items': {
'type': 'object',
'properties': {
- # NOTE: Now the type of 'id' is integer,
- # but here allows 'string' also because we
- # will be able to change it to 'uuid' in
- # the future.
- 'id': {'type': ['integer', 'string']},
+ 'uuid': {'type': 'string'},
'name': {'type': 'string'}
}
}
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/compute/test_nova.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_nova.py
rename to tempest/cli/simple_read_only/compute/test_nova.py
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/compute/test_nova_manage.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_nova_manage.py
rename to tempest/cli/simple_read_only/compute/test_nova_manage.py
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/data_processing/test_sahara.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_sahara.py
rename to tempest/cli/simple_read_only/data_processing/test_sahara.py
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/identity/test_keystone.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_keystone.py
rename to tempest/cli/simple_read_only/identity/test_keystone.py
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/image/test_glance.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_glance.py
rename to tempest/cli/simple_read_only/image/test_glance.py
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_neutron.py
rename to tempest/cli/simple_read_only/network/test_neutron.py
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/object_storage/test_swift.py
similarity index 100%
rename from tempest/cli/simple_read_only/test_swift.py
rename to tempest/cli/simple_read_only/object_storage/test_swift.py
diff --git a/tempest/cli/simple_read_only/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
similarity index 98%
rename from tempest/cli/simple_read_only/test_heat.py
rename to tempest/cli/simple_read_only/orchestration/test_heat.py
index 8e413a9..019818b 100644
--- a/tempest/cli/simple_read_only/test_heat.py
+++ b/tempest/cli/simple_read_only/orchestration/test_heat.py
@@ -56,7 +56,7 @@
def test_heat_resource_template_fmt_arg_long_json(self):
ret = self.heat('resource-template --format json OS::Nova::Server')
- self.assertIn('"Type": "OS::Nova::Server",', ret)
+ self.assertIn('"Type": "OS::Nova::Server"', ret)
self.assertIsInstance(json.loads(ret), dict)
def test_heat_resource_type_list(self):
diff --git a/tempest/cli/simple_read_only/test_ceilometer.py b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
similarity index 93%
rename from tempest/cli/simple_read_only/test_ceilometer.py
rename to tempest/cli/simple_read_only/telemetry/test_ceilometer.py
index b622dd4..1d2822d 100644
--- a/tempest/cli/simple_read_only/test_ceilometer.py
+++ b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
@@ -39,19 +39,15 @@
raise cls.skipException(msg)
super(SimpleReadOnlyCeilometerClientTest, cls).setUpClass()
- @test.services('telemetry')
def test_ceilometer_meter_list(self):
self.ceilometer('meter-list')
@test.attr(type='slow')
- @test.services('telemetry')
def test_ceilometer_resource_list(self):
self.ceilometer('resource-list')
- @test.services('telemetry')
def test_ceilometermeter_alarm_list(self):
self.ceilometer('alarm-list')
- @test.services('telemetry')
def test_ceilometer_version(self):
self.ceilometer('', flags='--version')
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
similarity index 96%
rename from tempest/cli/simple_read_only/test_cinder.py
rename to tempest/cli/simple_read_only/volume/test_cinder.py
index 3a9a7a6..e44a577 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/volume/test_cinder.py
@@ -121,8 +121,12 @@
self.assertTableStruct(zone_list, ['Name', 'Status'])
def test_cinder_endpoints(self):
- endpoints = self.parser.listing(self.cinder('endpoints'))
- self.assertTableStruct(endpoints, ['nova', 'Value'])
+ out = self.cinder('endpoints')
+ tables = self.parser.tables(out)
+ for table in tables:
+ headers = table['headers']
+ self.assertTrue(2 >= len(headers))
+ self.assertEqual('Value', headers[1])
def test_cinder_service_list(self):
service_list = self.parser.listing(self.cinder('service-list'))
diff --git a/tempest/clients.py b/tempest/clients.py
index 2b8b6fb..eab496e 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -180,12 +180,14 @@
from tempest.services.volume.json.backups_client import BackupsClientJSON
from tempest.services.volume.json.extensions_client import \
ExtensionsClientJSON as VolumeExtensionClientJSON
+from tempest.services.volume.json.qos_client import QosSpecsClientJSON
from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
from tempest.services.volume.json.volumes_client import VolumesClientJSON
from tempest.services.volume.v2.json.availability_zone_client import \
VolumeV2AvailabilityZoneClientJSON
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
+from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
from tempest.services.volume.v2.xml.availability_zone_client import \
VolumeV2AvailabilityZoneClientXML
@@ -428,6 +430,13 @@
self.security_group_default_rules_client = (
SecurityGroupDefaultRulesClientJSON(self.auth_provider))
self.networks_client = NetworksClientJSON(self.auth_provider)
+ # NOTE : As XML clients are not implemented for Qos-specs.
+ # So, setting the qos_client here. Once client are implemented,
+ # qos_client would be moved to its respective if/else.
+ # Bug : 1312553
+ self.volume_qos_client = QosSpecsClientJSON(self.auth_provider)
+ self.volume_qos_v2_client = QosSpecsV2ClientJSON(
+ self.auth_provider)
class AltManager(Manager):
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index f37bfdb..3f8db3d 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -21,7 +21,6 @@
import argparse
import datetime
-import logging
import os
import sys
import unittest
@@ -31,6 +30,7 @@
import tempest.auth
from tempest import config
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.openstack.common import timeutils
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
@@ -110,6 +110,13 @@
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+
+def destroy_tenants(tenants):
+ admin = keystone_admin()
+ for tenant in tenants:
+ tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
+ r, body = admin.identity.delete_tenant(tenant_id)
+
##############
#
# USERS
@@ -174,6 +181,13 @@
enabled=True)
+def destroy_users(users):
+ admin = keystone_admin()
+ for user in users:
+ user_id = admin.identity.get_user_by_name(user['name'])['id']
+ r, body = admin.identity.delete_user(user_id)
+
+
def collect_users(users):
global USERS
LOG.info("Collecting users")
@@ -343,6 +357,15 @@
obj['container'], obj['name'],
_file_contents(obj['file']))
+
+def destroy_objects(objects):
+ for obj in objects:
+ client = client_for_user(obj['owner'])
+ r, body = client.objects.delete_object(obj['container'], obj['name'])
+ if not (200 <= int(r['status']) < 299):
+ raise ValueError("unable to destroy object: [%s] %s" % (r, body))
+
+
#######################
#
# IMAGES
@@ -496,6 +519,13 @@
client.volumes.create_volume(volume['name'], volume['size'])
+def destroy_volumes(volumes):
+ for volume in volumes:
+ client = client_for_user(volume['owner'])
+ volume_id = _get_volume_by_name(client, volume['name'])['id']
+ r, body = client.volumes.delete_volume(volume_id)
+
+
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
@@ -531,18 +561,12 @@
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
-
- # Future
- # detach_volumes
- # destroy_volumes
-
destroy_servers(RES['servers'])
destroy_images(RES['images'])
- # destroy_objects
-
- # destroy_users
- # destroy_tenants
-
+ destroy_objects(RES['objects'])
+ destroy_volumes(RES['volumes'])
+ destroy_users(RES['users'])
+ destroy_tenants(RES['tenants'])
LOG.warn("Destroy mode incomplete")
@@ -592,21 +616,10 @@
config.CONF.set_config_path(OPTS.config_file)
-def setup_logging(debug=True):
+def setup_logging():
global LOG
+ logging.setup(__name__)
LOG = logging.getLogger(__name__)
- if debug:
- LOG.setLevel(logging.DEBUG)
- else:
- LOG.setLevel(logging.INFO)
-
- ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.DEBUG)
- formatter = logging.Formatter(
- datefmt='%Y-%m-%d %H:%M:%S',
- fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
- ch.setFormatter(formatter)
- LOG.addHandler(ch)
def main():
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index ad88ea2..c491169 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -52,8 +52,11 @@
hash_dict[temp_hash.hexdigest()] = account
return hash_dict
- def _create_hash_file(self, hash):
- path = os.path.join(os.path.join(self.accounts_dir, hash))
+ def is_multi_user(self):
+ return len(self.hash_dict) > 1
+
+ def _create_hash_file(self, hash_string):
+ path = os.path.join(os.path.join(self.accounts_dir, hash_string))
if not os.path.isfile(path):
open(path, 'w').close()
return True
@@ -66,20 +69,20 @@
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
return hashes[0]
- for hash in hashes:
- res = self._create_hash_file(hash)
+ for _hash in hashes:
+ res = self._create_hash_file(_hash)
if res:
- return hash
+ return _hash
msg = 'Insufficient number of users provided'
raise exceptions.InvalidConfiguration(msg)
def _get_creds(self):
- free_hash = self._get_free_hash(self.hashes.keys())
+ free_hash = self._get_free_hash(self.hash_dict.keys())
return self.hash_dict[free_hash]
@lockutils.synchronized('test_accounts_io', external=True)
- def remove_hash(self, hash):
- hash_path = os.path.join(self.accounts_dir, hash)
+ def remove_hash(self, hash_string):
+ hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
'one did not exist')
@@ -89,43 +92,79 @@
os.rmdir(self.accounts_dir)
def get_hash(self, creds):
- for hash in self.hash_dict:
- # NOTE(mtreinish) Assuming with v3 that username, tenant, password
- # is unique enough
- cred_dict = {
- 'username': creds.username,
- 'tenant_name': creds.tenant_name,
- 'password': creds.password
- }
- if self.hash_dict[hash] == cred_dict:
- return hash
+ for _hash in self.hash_dict:
+ # Comparing on the attributes that are expected in the YAML
+ if all([getattr(creds, k) == self.hash_dict[_hash][k] for k in
+ creds.CONF_ATTRIBUTES]):
+ return _hash
raise AttributeError('Invalid credentials %s' % creds)
def remove_credentials(self, creds):
- hash = self.get_hash(creds)
- self.remove_hash(hash, self.accounts_dir)
+ _hash = self.get_hash(creds)
+ self.remove_hash(_hash)
def get_primary_creds(self):
- if self.credentials.get('primary'):
- return self.credentials.get('primary')
+ if self.isolated_creds.get('primary'):
+ return self.isolated_creds.get('primary')
creds = self._get_creds()
primary_credential = auth.get_credentials(**creds)
- self.credentials['primary'] = primary_credential
+ self.isolated_creds['primary'] = primary_credential
return primary_credential
def get_alt_creds(self):
- if self.credentials.get('alt'):
- return self.credentials.get('alt')
+ if self.isolated_creds.get('alt'):
+ return self.isolated_creds.get('alt')
creds = self._get_creds()
alt_credential = auth.get_credentials(**creds)
- self.credentials['alt'] = alt_credential
+ self.isolated_creds['alt'] = alt_credential
return alt_credential
def clear_isolated_creds(self):
- for creds in self.credentials.values():
+ for creds in self.isolated_creds.values():
self.remove_credentials(creds)
def get_admin_creds(self):
msg = ('If admin credentials are available tenant_isolation should be'
' used instead')
raise NotImplementedError(msg)
+
+
+class NotLockingAccounts(Accounts):
+ """Credentials provider which always returns the first and second
+ configured accounts as primary and alt users.
+ This credential provider can be used in case of serial test execution
+ to preserve the current behaviour of the serial tempest run.
+ """
+
+ def get_creds(self, id):
+ try:
+ # No need to sort the dict as within the same python process
+ # the HASH seed won't change, so subsequent calls to keys()
+ # will return the same result
+ _hash = self.hash_dict.keys()[id]
+ except IndexError:
+ msg = 'Insufficient number of users provided'
+ raise exceptions.InvalidConfiguration(msg)
+ return self.hash_dict[_hash]
+
+ def get_primary_creds(self):
+ if self.isolated_creds.get('primary'):
+ return self.isolated_creds.get('primary')
+ creds = self.get_creds(0)
+ primary_credential = auth.get_credentials(**creds)
+ self.isolated_creds['primary'] = primary_credential
+ return primary_credential
+
+ def get_alt_creds(self):
+ if self.isolated_creds.get('alt'):
+ return self.isolated_creds.get('alt')
+ creds = self.get_creds(1)
+ alt_credential = auth.get_credentials(**creds)
+ self.isolated_creds['alt'] = alt_credential
+ return alt_credential
+
+ def clear_isolated_creds(self):
+ self.isolated_creds = {}
+
+ def get_admin_creds(self):
+ return auth.get_default_credentials("identity_admin", fill_in=False)
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index 7348a7d..39e3a67 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -13,7 +13,6 @@
# under the License.
import re
-from unittest import util
from testtools import helpers
@@ -204,24 +203,35 @@
self.intersect = set(self.expected) & set(self.actual)
self.symmetric_diff = set(self.expected) ^ set(self.actual)
+ def _format_dict(self, dict_to_format):
+ # Ensure the error string dict is printed in a set order
+ # NOTE(mtreinish): needed to ensure a deterministic error msg for
+ # testing. Otherwise the error message will be dependent on the
+ # dict ordering.
+ dict_string = "{"
+ for key in sorted(dict_to_format):
+ dict_string += "'%s': %s, " % (key, dict_to_format[key])
+ dict_string = dict_string[:-2] + '}'
+ return dict_string
+
def describe(self):
msg = ""
if self.symmetric_diff:
only_expected = helpers.dict_subtract(self.expected, self.actual)
only_actual = helpers.dict_subtract(self.actual, self.expected)
if only_expected:
- msg += "Only in expected:\n %s\n" % \
- util.safe_repr(only_expected)
+ msg += "Only in expected:\n %s\n" % self._format_dict(
+ only_expected)
if only_actual:
- msg += "Only in actual:\n %s\n" % \
- util.safe_repr(only_actual)
+ msg += "Only in actual:\n %s\n" % self._format_dict(
+ only_actual)
diff_set = set(o for o in self.intersect if
self.expected[o] != self.actual[o])
if diff_set:
msg += "Differences:\n"
- for o in diff_set:
- msg += " %s: expected %s, actual %s\n" % (
- o, self.expected[o], self.actual[o])
+ for o in diff_set:
+ msg += " %s: expected %s, actual %s\n" % (
+ o, self.expected[o], self.actual[o])
return msg
def get_details(self):
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index dca1f86..02c50e4 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -134,6 +134,8 @@
self.identity_admin_client.users.delete(user)
def _delete_tenant(self, tenant):
+ if CONF.service_available.neutron:
+ self._cleanup_default_secgroup(tenant)
if self.tempest_client:
self.identity_admin_client.delete_tenant(tenant)
else:
@@ -376,6 +378,22 @@
LOG.warn('network with name: %s not found for delete' %
network_name)
+ def _cleanup_default_secgroup(self, tenant):
+ net_client = self.network_admin_client
+ if self.tempest_client:
+ resp, resp_body = net_client.list_security_groups(tenant_id=tenant,
+ name="default")
+ else:
+ resp_body = net_client.list_security_groups(tenant_id=tenant,
+ name="default")
+ secgroups_to_delete = resp_body['security_groups']
+ for secgroup in secgroups_to_delete:
+ try:
+ net_client.delete_security_group(secgroup['id'])
+ except exceptions.NotFound:
+ LOG.warn('Security group %s, id %s not found for clean-up' %
+ (secgroup['name'], secgroup['id']))
+
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
for cred in self.isolated_net_resources:
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 132d0a6..e584cbf 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -552,7 +552,13 @@
if self.is_resource_deleted(id):
return
if int(time.time()) - start_time >= self.build_timeout:
- raise exceptions.TimeoutException
+ message = ('Failed to delete resource %(id)s within the '
+ 'required time (%(timeout)s s).' %
+ {'id': id, 'timeout': self.build_timeout})
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def is_resource_deleted(self, id):
diff --git a/tempest/config.py b/tempest/config.py
index 93d4874..d3449a7 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -419,10 +419,10 @@
default=28,
help="The mask bits for tenant ipv4 subnets"),
cfg.StrOpt('tenant_network_v6_cidr',
- default="2003::/64",
+ default="2003::/48",
help="The cidr block to allocate tenant ipv6 subnets from"),
cfg.IntOpt('tenant_network_v6_mask_bits',
- default=96,
+ default=64,
help="The mask bits for tenant ipv6 subnets"),
cfg.BoolOpt('tenant_networks_reachable',
default=False,
@@ -622,6 +622,15 @@
help="A list of the enabled optional discoverable apis. "
"A single entry, all, indicates that all of these "
"features are expected to be enabled"),
+ cfg.BoolOpt('container_sync',
+ default=True,
+ help="Execute (old style) container-sync tests"),
+ cfg.BoolOpt('object_versioning',
+ default=True,
+ help="Execute object-versioning tests"),
+ cfg.BoolOpt('discoverability',
+ default=True,
+ help="Execute discoverability tests"),
]
database_group = cfg.OptGroup(name='database',
diff --git a/tempest/manager.py b/tempest/manager.py
index fb2842f..75aee96 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -51,17 +51,17 @@
self.client_attr_names = []
@classmethod
- def get_auth_provider_class(cls, auth_version):
- if auth_version == 'v2':
- return auth.KeystoneV2AuthProvider
- else:
+ def get_auth_provider_class(cls, credentials):
+ if isinstance(credentials, auth.KeystoneV3Credentials):
return auth.KeystoneV3AuthProvider
+ else:
+ return auth.KeystoneV2AuthProvider
def get_auth_provider(self, credentials):
if credentials is None:
raise exceptions.InvalidCredentials(
'Credentials must be specified')
- auth_provider_class = self.get_auth_provider_class(self.auth_version)
+ auth_provider_class = self.get_auth_provider_class(credentials)
return auth_provider_class(
client_type=getattr(self, 'client_type', None),
interface=getattr(self, 'interface', None),
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f7db79d..1897ec7 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,13 +16,10 @@
import logging
import os
-import re
import subprocess
-import time
from cinderclient import exceptions as cinder_exceptions
import glanceclient
-from heatclient import exc as heat_exceptions
import netaddr
from neutronclient.common import exceptions as exc
from novaclient import exceptions as nova_exceptions
@@ -38,7 +35,6 @@
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log
-from tempest.openstack.common import timeutils
from tempest.services.network import resources as net_resources
import tempest.test
@@ -92,6 +88,15 @@
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
cls.network_client = cls.manager.network_client
+ # Heat client
+ cls.orchestration_client = cls.manager.orchestration_client
+
+ @classmethod
+ def tearDownClass(cls):
+ # Isolated creds also manages network resources, which should
+ # be cleaned up at the end of the test case
+ cls.isolated_creds.clear_isolated_creds()
+ super(ScenarioTest, cls).tearDownClass()
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -107,6 +112,11 @@
'user')
@classmethod
+ def alt_credentials(cls):
+ return cls._get_credentials(cls.isolated_creds.get_alt_creds,
+ 'alt_user')
+
+ @classmethod
def admin_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'identity_admin')
@@ -181,11 +191,13 @@
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
- def create_keypair(self):
+ def create_keypair(self, client=None):
+ if not client:
+ client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
- resp, body = self.keypairs_client.create_keypair(name)
- self.addCleanup(self.keypairs_client.delete_keypair, name)
+ resp, body = client.create_keypair(name)
+ self.addCleanup(client.delete_keypair, name)
return body
def create_server(self, name=None, image=None, flavor=None,
@@ -257,14 +269,18 @@
_, volume = self.volumes_client.create_volume(
size=size, display_name=name, snapshot_id=snapshot_id,
imageRef=imageRef, volume_type=volume_type)
+
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
- self.addCleanup_with_wait(
- waiter_callable=self.volumes_client.wait_for_resource_deletion,
- thing_id=volume['id'], thing_id_param='id',
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[self.volumes_client.delete_volume, volume['id']])
+ self.addCleanup(self.delete_wrapper,
+ self.volumes_client.delete_volume, volume['id'])
+ else:
+ self.addCleanup_with_wait(
+ waiter_callable=self.volumes_client.wait_for_resource_deletion,
+ thing_id=volume['id'], thing_id_param='id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.volumes_client.delete_volume, volume['id']])
self.assertEqual(name, volume['display_name'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
@@ -398,6 +414,9 @@
LOG.debug("image:%s" % self.image)
def _log_console_output(self, servers=None):
+ if not CONF.compute_feature_enabled.console_output:
+ LOG.debug('Console output not supported, cannot log')
+ return
if not servers:
_, servers = self.servers_client.list_servers()
servers = servers['servers']
@@ -429,6 +448,24 @@
image_name, server['name'])
return snapshot_image
+ def nova_volume_attach(self):
+ # TODO(andreaf) Device should be here CONF.compute.volume_device_name
+ _, volume_attachment = self.servers_client.attach_volume(
+ self.server['id'], self.volume['id'], '/dev/vdb')
+ volume = volume_attachment['volumeAttachment']
+ self.assertEqual(self.volume['id'], volume['id'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+ # Refresh the volume after the attachment
+ _, self.volume = self.volumes_client.get_volume(volume['id'])
+
+ def nova_volume_detach(self):
+ self.servers_client.detach_volume(self.server['id'], self.volume['id'])
+ self.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+
+ _, volume = self.volumes_client.get_volume(self.volume['id'])
+ self.assertEqual('available', volume['status'])
+
# TODO(yfried): change this class name to NetworkScenarioTest once client
# migration is complete
@@ -464,11 +501,15 @@
cls.tenant_id = cls.manager.identity_client.tenant_id
cls.check_preconditions()
- def _create_network(self, tenant_id, namestart='network-smoke-'):
+ def _create_network(self, client=None, tenant_id=None,
+ namestart='network-smoke-'):
+ if not client:
+ client = self.network_client
+ if not tenant_id:
+ tenant_id = client.rest_client.tenant_id
name = data_utils.rand_name(namestart)
- _, result = self.network_client.create_network(name=name,
- tenant_id=tenant_id)
- network = net_resources.DeletableNetwork(client=self.network_client,
+ _, result = client.create_network(name=name, tenant_id=tenant_id)
+ network = net_resources.DeletableNetwork(client=client,
**result['network'])
self.assertEqual(network.name, name)
self.addCleanup(self.delete_wrapper, network.delete)
@@ -498,11 +539,14 @@
return resource_list[resource_type]
return temp
- def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
+ def _create_subnet(self, network, client=None, namestart='subnet-smoke',
+ **kwargs):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
+ if not client:
+ client = self.network_client
def cidr_in_use(cidr, tenant_id):
"""
@@ -531,27 +575,29 @@
**kwargs
)
try:
- _, result = self.network_client.create_subnet(**subnet)
+ _, result = client.create_subnet(**subnet)
break
- except exc.NeutronClientException as e:
+ except exceptions.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
- subnet = net_resources.DeletableSubnet(client=self.network_client,
+ subnet = net_resources.DeletableSubnet(client=client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.addCleanup(self.delete_wrapper, subnet.delete)
return subnet
- def _create_port(self, network, namestart='port-quotatest'):
+ def _create_port(self, network, client=None, namestart='port-quotatest'):
+ if not client:
+ client = self.network_client
name = data_utils.rand_name(namestart)
- _, result = self.network_client.create_port(
+ _, result = client.create_port(
name=name,
network_id=network.id,
tenant_id=network.tenant_id)
self.assertIsNotNone(result, 'Unable to allocate port')
- port = net_resources.DeletablePort(client=self.network_client,
+ port = net_resources.DeletablePort(client=client,
**result['port'])
self.addCleanup(self.delete_wrapper, port.delete)
return port
@@ -563,16 +609,23 @@
"Unable to determine which port to target.")
return ports[0]['id']
- def _create_floating_ip(self, thing, external_network_id, port_id=None):
+ def _get_network_by_name(self, network_name):
+ net = self._list_networks(name=network_name)
+ return net_common.AttributeDict(net[0])
+
+ def _create_floating_ip(self, thing, external_network_id, port_id=None,
+ client=None):
+ if not client:
+ client = self.network_client
if not port_id:
port_id = self._get_server_port_id(thing)
- _, result = self.network_client.create_floatingip(
+ _, result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id']
)
floating_ip = net_resources.DeletableFloatingIp(
- client=self.network_client,
+ client=client,
**result['floatingip'])
self.addCleanup(self.delete_wrapper, floating_ip.delete)
return floating_ip
@@ -702,10 +755,12 @@
CONF.compute.ping_timeout,
1)
- def _create_security_group(self, tenant_id, client=None,
+ def _create_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
if client is None:
client = self.network_client
+ if tenant_id is None:
+ tenant_id = client.rest_client.tenant_id
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
@@ -717,7 +772,7 @@
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
- def _create_empty_security_group(self, tenant_id, client=None,
+ def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
@@ -730,6 +785,8 @@
"""
if client is None:
client = self.network_client
+ if not tenant_id:
+ tenant_id = client.rest_client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
@@ -746,13 +803,15 @@
self.addCleanup(self.delete_wrapper, secgroup.delete)
return secgroup
- def _default_security_group(self, tenant_id, client=None):
+ def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.network_client
+ if not tenant_id:
+ tenant_id = client.rest_client.tenant_id
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
@@ -765,7 +824,7 @@
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
- def _create_security_group_rule(self, client=None, secgroup=None,
+ def _create_security_group_rule(self, secgroup=None, client=None,
tenant_id=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
@@ -773,8 +832,6 @@
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
- :param secgroup_id: search for secgroup by id
- default -- choose default secgroup for given tenant_id
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
@@ -788,8 +845,11 @@
"""
if client is None:
client = self.network_client
+ if not tenant_id:
+ tenant_id = client.rest_client.tenant_id
if secgroup is None:
- secgroup = self._default_security_group(tenant_id)
+ secgroup = self._default_security_group(client=client,
+ tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id)
@@ -851,7 +911,7 @@
username=ssh_login,
private_key=private_key)
- def _get_router(self, tenant_id):
+ def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
@@ -860,41 +920,62 @@
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
+ if not client:
+ client = self.network_client
+ if not tenant_id:
+ tenant_id = client.rest_client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
- result = self.network_client.show_router(router_id)
+ result = client.show_router(router_id)
return net_resources.AttributeDict(**result['router'])
elif network_id:
- router = self._create_router(tenant_id)
+ router = self._create_router(client, tenant_id)
router.set_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
- def _create_router(self, tenant_id, namestart='router-smoke-'):
+ def _create_router(self, client=None, tenant_id=None,
+ namestart='router-smoke'):
+ if not client:
+ client = self.network_client
+ if not tenant_id:
+ tenant_id = client.rest_client.tenant_id
name = data_utils.rand_name(namestart)
- _, result = self.network_client.create_router(name=name,
- admin_state_up=True,
- tenant_id=tenant_id, )
- router = net_resources.DeletableRouter(client=self.network_client,
+ _, result = client.create_router(name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ router = net_resources.DeletableRouter(client=client,
**result['router'])
self.assertEqual(router.name, name)
self.addCleanup(self.delete_wrapper, router.delete)
return router
- def _create_networks(self, tenant_id=None):
+ def create_networks(self, client=None, tenant_id=None):
"""Create a network with a subnet connected to a router.
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
:returns: network, subnet, router
"""
- if tenant_id is None:
- tenant_id = self.tenant_id
- network = self._create_network(tenant_id)
- router = self._get_router(tenant_id)
- subnet = self._create_subnet(network)
- subnet.add_to_router(router.id)
+ if CONF.baremetal.driver_enabled:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ router = None
+ subnet = None
+ else:
+ network = self._create_network(client=client, tenant_id=tenant_id)
+ router = self._get_router(client=client, tenant_id=tenant_id)
+ subnet = self._create_subnet(network=network, client=client)
+ subnet.add_to_router(router.id)
return network, subnet, router
@@ -1559,7 +1640,7 @@
timeout=CONF.baremetal.unprovision_timeout)
-class EncryptionScenarioTest(OfficialClientTest):
+class EncryptionScenarioTest(ScenarioTest):
"""
Base class for encryption scenario tests
"""
@@ -1567,11 +1648,7 @@
@classmethod
def setUpClass(cls):
super(EncryptionScenarioTest, cls).setUpClass()
-
- # use admin credentials to create encrypted volume types
- admin_creds = cls.admin_credentials()
- manager = clients.OfficialClientManager(credentials=admin_creds)
- cls.admin_volume_client = manager.volume_client
+ cls.admin_volume_types_client = cls.admin_manager.volume_types_client
def _wait_for_volume_status(self, status):
self.status_timeout(
@@ -1579,53 +1656,35 @@
def nova_boot(self):
self.keypair = self.create_keypair()
- create_kwargs = {'key_name': self.keypair.name}
- self.server = self.create_server(self.compute_client,
- image=self.image,
+ create_kwargs = {'key_name': self.keypair['name']}
+ self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def create_volume_type(self, client=None, name=None):
if not client:
- client = self.admin_volume_client
+ client = self.admin_volume_types_client
if not name:
name = 'generic'
randomized_name = data_utils.rand_name('scenario-type-' + name + '-')
LOG.debug("Creating a volume type: %s", randomized_name)
- volume_type = client.volume_types.create(randomized_name)
- self.addCleanup(client.volume_types.delete, volume_type.id)
- return volume_type
+ _, body = client.create_volume_type(
+ randomized_name)
+ self.assertIn('id', body)
+ self.addCleanup(client.delete_volume_type, body['id'])
+ return body
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
control_location=None):
if not client:
- client = self.admin_volume_client
+ client = self.admin_volume_types_client
if not type_id:
volume_type = self.create_volume_type()
- type_id = volume_type.id
+ type_id = volume_type['id']
LOG.debug("Creating an encryption type for volume type: %s", type_id)
- client.volume_encryption_types.create(type_id,
- {'provider': provider,
- 'key_size': key_size,
- 'cipher': cipher,
- 'control_location':
- control_location})
-
- def nova_volume_attach(self):
- attach_volume_client = self.compute_client.volumes.create_server_volume
- volume = attach_volume_client(self.server.id,
- self.volume.id,
- '/dev/vdb')
- self.assertEqual(self.volume.id, volume.id)
- self._wait_for_volume_status('in-use')
-
- def nova_volume_detach(self):
- detach_volume_client = self.compute_client.volumes.delete_server_volume
- detach_volume_client(self.server.id, self.volume.id)
- self._wait_for_volume_status('available')
-
- volume = self.volume_client.volumes.get(self.volume.id)
- self.assertEqual('available', volume.status)
+ client.create_encryption_type(
+ type_id, provider=provider, key_size=key_size, cipher=cipher,
+ control_location=control_location)
class NetworkScenarioTest(OfficialClientTest):
@@ -1763,6 +1822,10 @@
"Unable to determine which port to target.")
return ports[0]['id']
+ def _get_network_by_name(self, network_name):
+ net = self._list_networks(name=network_name)
+ return net_common.AttributeDict(net[0])
+
def _create_floating_ip(self, thing, external_network_id, port_id=None):
if not port_id:
port_id = self._get_server_port_id(thing)
@@ -2160,21 +2223,35 @@
self.addCleanup(self.delete_wrapper, router)
return router
- def _create_networks(self, tenant_id=None):
+ def create_networks(self, tenant_id=None):
"""Create a network with a subnet connected to a router.
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
:returns: network, subnet, router
"""
- if tenant_id is None:
- tenant_id = self.tenant_id
- network = self._create_network(tenant_id)
- router = self._get_router(tenant_id)
- subnet = self._create_subnet(network)
- subnet.add_to_router(router.id)
+ if CONF.baremetal.driver_enabled:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ router = None
+ subnet = None
+ else:
+ if tenant_id is None:
+ tenant_id = self.tenant_id
+ network = self._create_network(tenant_id)
+ router = self._get_router(tenant_id)
+ subnet = self._create_subnet(network)
+ subnet.add_to_router(router.id)
return network, subnet, router
-class OrchestrationScenarioTest(OfficialClientTest):
+class OrchestrationScenarioTest(ScenarioTest):
"""
Base class for orchestration scenario tests
"""
@@ -2204,15 +2281,15 @@
@classmethod
def _get_default_network(cls):
- networks = cls.network_client.list_networks()
- for net in networks['networks']:
- if net['name'] == CONF.compute.fixed_network_name:
+ _, networks = cls.networks_client.list_networks()
+ for net in networks:
+ if net['label'] == CONF.compute.fixed_network_name:
return net
@staticmethod
def _stack_output(stack, output_key):
"""Return a stack output value for a given key."""
- return next((o['output_value'] for o in stack.outputs
+ return next((o['output_value'] for o in stack['outputs']
if o['output_key'] == output_key), None)
def _ping_ip_address(self, ip_address, should_succeed=True):
@@ -2228,82 +2305,6 @@
return tempest.test.call_until_true(
ping, CONF.orchestration.build_timeout, 1)
- def _wait_for_resource_status(self, stack_identifier, resource_name,
- status, failure_pattern='^.*_FAILED$'):
- """Waits for a Resource to reach a given status."""
- fail_regexp = re.compile(failure_pattern)
- build_timeout = CONF.orchestration.build_timeout
- build_interval = CONF.orchestration.build_interval
-
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start,
- timeutils.utcnow()) < build_timeout:
- try:
- res = self.client.resources.get(
- stack_identifier, resource_name)
- except heat_exceptions.HTTPNotFound:
- # ignore this, as the resource may not have
- # been created yet
- pass
- else:
- if res.resource_status == status:
- return
- if fail_regexp.search(res.resource_status):
- raise exceptions.StackResourceBuildErrorException(
- resource_name=res.resource_name,
- stack_identifier=stack_identifier,
- resource_status=res.resource_status,
- resource_status_reason=res.resource_status_reason)
- time.sleep(build_interval)
-
- message = ('Resource %s failed to reach %s status within '
- 'the required time (%s s).' %
- (res.resource_name, status, build_timeout))
- raise exceptions.TimeoutException(message)
-
- def _wait_for_stack_status(self, stack_identifier, status,
- failure_pattern='^.*_FAILED$'):
- """
- Waits for a Stack to reach a given status.
-
- Note this compares the full $action_$status, e.g
- CREATE_COMPLETE, not just COMPLETE which is exposed
- via the status property of Stack in heatclient
- """
- fail_regexp = re.compile(failure_pattern)
- build_timeout = CONF.orchestration.build_timeout
- build_interval = CONF.orchestration.build_interval
-
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start,
- timeutils.utcnow()) < build_timeout:
- try:
- stack = self.client.stacks.get(stack_identifier)
- except heat_exceptions.HTTPNotFound:
- # ignore this, as the stackource may not have
- # been created yet
- pass
- else:
- if stack.stack_status == status:
- return
- if fail_regexp.search(stack.stack_status):
- raise exceptions.StackBuildErrorException(
- stack_identifier=stack_identifier,
- stack_status=stack.stack_status,
- stack_status_reason=stack.stack_status_reason)
- time.sleep(build_interval)
-
- message = ('Stack %s failed to reach %s status within '
- 'the required time (%s s).' %
- (stack.stack_name, status, build_timeout))
- raise exceptions.TimeoutException(message)
-
- def _stack_delete(self, stack_identifier):
- try:
- self.client.stacks.delete(stack_identifier)
- except heat_exceptions.HTTPNotFound:
- pass
-
class SwiftScenarioTest(ScenarioTest):
"""
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index 36e6126..4e85429 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -38,7 +38,7 @@
self.keypair_name = CONF.orchestration.keypair_name
else:
self.keypair = self.create_keypair()
- self.keypair_name = self.keypair.id
+ self.keypair_name = self.keypair['name']
def launch_stack(self):
net = self._get_default_network()
@@ -52,32 +52,36 @@
# create the stack
self.template = self._load_template(__file__, self.template_name)
- self.client.stacks.create(
- stack_name=self.stack_name,
+ _, stack = self.client.create_stack(
+ name=self.stack_name,
template=self.template,
parameters=self.parameters)
+ stack = stack['stack']
- self.stack = self.client.stacks.get(self.stack_name)
- self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
- self.addCleanup(self._stack_delete, self.stack_identifier)
+ _, self.stack = self.client.get_stack(stack['id'])
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack['id'])
+ self.addCleanup(self.delete_wrapper,
+ self.orchestration_client.delete_stack,
+ self.stack_identifier)
def check_stack(self):
sid = self.stack_identifier
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'WaitHandle', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'SmokeKeys', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'CfnUser', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'SmokeServer', 'CREATE_COMPLETE')
- server_resource = self.client.resources.get(sid, 'SmokeServer')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
- server_ip = server.networks[CONF.compute.network_for_ssh][0]
+ _, server_resource = self.client.get_resource(sid, 'SmokeServer')
+ server_id = server_resource['physical_resource_id']
+ _, server = self.servers_client.get_server(server_id)
+ server_ip =\
+ server['addresses'][CONF.compute.network_for_ssh][0]['addr']
if not self._ping_ip_address(server_ip):
self._log_console_output(servers=[server])
@@ -85,7 +89,7 @@
"Timed out waiting for %s to become reachable" % server_ip)
try:
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
except (exceptions.StackResourceBuildErrorException,
exceptions.TimeoutException) as e:
@@ -96,9 +100,9 @@
# logs to be compared
self._log_console_output(servers=[server])
- self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+ self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
- stack = self.client.stacks.get(sid)
+ _, stack = self.client.get_stack(sid)
# This is an assert of great significance, as it means the following
# has happened:
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index 9ad6bc4..efbf4ce 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -133,18 +133,23 @@
# the same size as our flavor definition.
eph_size = self.get_flavor_ephemeral_size()
self.assertIsNotNone(eph_size)
- self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
+ if eph_size > 0:
+ preserve_ephemeral = True
- # Create the test file
- self.create_remote_file(vm_client, test_filename)
+ self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
+ # Create the test file
+ self.create_remote_file(vm_client, test_filename)
+ else:
+ preserve_ephemeral = False
- # Rebuild and preserve the ephemeral partition
- self.rebuild_instance(True)
+ # Rebuild and preserve the ephemeral partition if it exists
+ self.rebuild_instance(preserve_ephemeral)
self.verify_connectivity()
# Check that we maintained our data
- vm_client = self.get_remote_client(self.instance)
- self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
- vm_client.exec_command('ls ' + test_filename)
+ if eph_size > 0:
+ vm_client = self.get_remote_client(self.instance)
+ self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
+ vm_client.exec_command('ls ' + test_filename)
self.terminate_instance()
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 4fcc70a..72cc8b0 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -69,6 +69,7 @@
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
+ @test.skip_because(bug="1345955")
@test.services('dashboard')
def test_basic_scenario(self):
self.check_login_page()
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 366cd93..ac2ef8a 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -37,12 +37,12 @@
def create_encrypted_volume(self, encryption_provider):
volume_type = self.create_volume_type(name='luks')
- self.create_encryption_type(type_id=volume_type.id,
+ self.create_encryption_type(type_id=volume_type['id'],
provider=encryption_provider,
key_size=512,
cipher='aes-xts-plain64',
control_location='front-end')
- self.volume = self.create_volume(volume_type=volume_type.name)
+ self.volume = self.create_volume(volume_type=volume_type['name'])
def attach_detach_volume(self):
self.nova_volume_attach()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 15cf13b..a7ea70f 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -25,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class TestLargeOpsScenario(manager.NetworkScenarioTest):
+class TestLargeOpsScenario(manager.ScenarioTest):
"""
Test large operations.
@@ -44,29 +44,34 @@
def _wait_for_server_status(self, status):
for server in self.servers:
- self.status_timeout(
- self.compute_client.servers, server.id, status)
+ self.servers_client.wait_for_server_status(server['id'], status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server-')
- client = self.compute_client
flavor_id = CONF.compute.flavor_ref
- secgroup = self._create_security_group_nova()
- self.servers = client.servers.create(
- name=name, image=self.image,
- flavor=flavor_id,
+ secgroup = self._create_security_group()
+ self.servers_client.create_server(
+ name,
+ self.image,
+ flavor_id,
min_count=CONF.scenario.large_ops_number,
- security_groups=[secgroup.name])
+ security_groups=[secgroup])
# needed because of bug 1199788
- self.servers = [x for x in client.servers.list() if name in x.name]
+ params = {'name': name}
+ _, server_list = self.servers_client.list_servers(params)
+ self.servers = server_list['servers']
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
- self.addCleanup(self.delete_timeout,
- self.compute_client.servers,
- server.id)
+ self.addCleanup(self.servers_client.wait_for_server_termination,
+ server['id'])
for server in self.servers:
- self.addCleanup_with_wait(self.compute_client.servers, server.id)
+ self.addCleanup_with_wait(
+ waiter_callable=(self.servers_client.
+ wait_for_server_termination),
+ thing_id=server['id'], thing_id_param='server_id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.servers_client.delete_server, server['id']])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 8191984..35e50e8 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -73,6 +73,35 @@
self.server_ips = {}
self.server_fixed_ips = {}
self._create_security_group()
+ self._set_net_and_subnet()
+
+ def _set_net_and_subnet(self):
+ """
+ Query and set appropriate network and subnet attributes to be used
+ for the test. Existing tenant networks are used if they are found.
+ The configured private network and associated subnet is used as a
+ fallback in absence of tenant networking.
+ """
+ try:
+ tenant_net = self._list_networks(tenant_id=self.tenant_id)[0]
+ except IndexError:
+ tenant_net = None
+
+ if tenant_net:
+ tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
+ self.subnet = net_common.DeletableSubnet(
+ client=self.network_client,
+ **tenant_subnet)
+ self.network = tenant_net
+ else:
+ self.network = self._get_network_by_name(
+ config.compute.fixed_network_name)
+ # TODO(adam_g): We are assuming that the first subnet associated
+ # with the fixed network is the one we want. In the future, we
+ # should instead pull a subnet id from config, which is set by
+ # devstack/admin/etc.
+ subnet = self._list_subnets(network_id=self.network['id'])[0]
+ self.subnet = net_common.AttributeDict(subnet)
def _create_security_group(self):
self.security_group = self._create_security_group_neutron(
@@ -96,10 +125,9 @@
def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
security_groups = [self.security_group.name]
- net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
'nics': [
- {'net-id': net['id']},
+ {'net-id': self.network['id']},
],
'key_name': keypair.name,
'security_groups': security_groups,
@@ -107,6 +135,7 @@
server = self.create_server(name=name,
create_kwargs=create_kwargs)
self.servers_keypairs[server.id] = keypair
+ net_name = self.network['name']
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
@@ -115,8 +144,8 @@
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
- self.server_ips[server.id] = server.networks[net['name']][0]
- self.server_fixed_ips[server.id] = server.networks[net['name']][0]
+ self.server_ips[server.id] = server.networks[net_name][0]
+ self.server_fixed_ips[server.id] = server.networks[net_name][0]
self.assertTrue(self.servers_keypairs)
return server
@@ -132,7 +161,6 @@
1. SSH to the instance
2. Start two http backends listening on ports 80 and 88 respectively
"""
-
for server_id, ip in self.server_ips.iteritems():
private_key = self.servers_keypairs[server_id].private_key
server_name = self.compute_client.servers.get(server_id).name
@@ -196,10 +224,6 @@
def _create_pool(self):
"""Create a pool with ROUND_ROBIN algorithm."""
- # get tenant subnet and verify there's only one
- subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
- self.subnet = net_common.DeletableSubnet(client=self.network_client,
- **subnet)
self.pool = super(TestLoadBalancerBasic, self)._create_pool(
lb_method='ROUND_ROBIN',
protocol='HTTP',
@@ -288,7 +312,6 @@
self.assertEqual(expected,
set(resp))
- @test.attr(type='smoke')
@test.services('compute', 'network')
def test_load_balancer_basic(self):
self._create_server('server1')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 431de9a..c145551 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -25,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
+class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
"""
This test case checks VM connectivity after some advanced
@@ -40,9 +40,8 @@
"""
@classmethod
- def setUpClass(cls):
- super(TestNetworkAdvancedServerOps, cls).setUpClass()
- cls.check_preconditions()
+ def check_preconditions(cls):
+ super(TestNetworkAdvancedServerOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
@@ -50,35 +49,39 @@
cls.enabled = False
raise cls.skipException(msg)
- def setUp(self):
- super(TestNetworkAdvancedServerOps, self).setUp()
- key_name = data_utils.rand_name('keypair-smoke-')
- self.keypair = self.create_keypair(name=key_name)
- security_group =\
- self._create_security_group_neutron(tenant_id=self.tenant_id)
- network = self._create_network(self.tenant_id)
- router = self._get_router(self.tenant_id)
- subnet = self._create_subnet(network)
- subnet.add_to_router(router.id)
+ @classmethod
+ def setUpClass(cls):
+ # Create no network resources for these tests.
+ cls.set_network_resources()
+ super(TestNetworkAdvancedServerOps, cls).setUpClass()
+
+ def _setup_network_and_servers(self):
+ self.keypair = self.create_keypair()
+ security_group = self._create_security_group()
+ network, subnet, router = self.create_networks()
public_network_id = CONF.network.public_network_id
create_kwargs = {
'nics': [
{'net-id': network.id},
],
- 'key_name': self.keypair.name,
- 'security_groups': [security_group.name],
+ 'key_name': self.keypair['name'],
+ 'security_groups': [security_group],
}
- server_name = data_utils.rand_name('server-smoke-%d-')
+ server_name = data_utils.rand_name('server-smoke')
self.server = self.create_server(name=server_name,
create_kwargs=create_kwargs)
self.floating_ip = self._create_floating_ip(self.server,
public_network_id)
+ # Verify that we can indeed connect to the server before we mess with
+ # it's state
+ self._wait_server_status_and_check_network_connectivity()
def _check_network_connectivity(self, should_connect=True):
username = CONF.compute.image_ssh_user
- private_key = self.keypair.private_key
+ private_key = self.keypair['private_key']
self._check_tenant_network_connectivity(
- self.server, username, private_key, should_connect=should_connect,
+ self.server, username, private_key,
+ should_connect=should_connect,
servers_for_debug=[self.server])
floating_ip = self.floating_ip.floating_ip_address
self._check_public_network_connectivity(floating_ip, username,
@@ -86,50 +89,54 @@
servers=[self.server])
def _wait_server_status_and_check_network_connectivity(self):
- self.status_timeout(self.compute_client.servers, self.server.id,
- 'ACTIVE')
+ self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
self._check_network_connectivity()
@test.services('compute', 'network')
def test_server_connectivity_stop_start(self):
- self.server.stop()
- self.status_timeout(self.compute_client.servers, self.server.id,
- 'SHUTOFF')
+ self._setup_network_and_servers()
+ self.servers_client.stop(self.server['id'])
+ self.servers_client.wait_for_server_status(self.server['id'],
+ 'SHUTOFF')
self._check_network_connectivity(should_connect=False)
- self.server.start()
+ self.servers_client.start(self.server['id'])
self._wait_server_status_and_check_network_connectivity()
@test.services('compute', 'network')
def test_server_connectivity_reboot(self):
- self.server.reboot()
+ self._setup_network_and_servers()
+ self.servers_client.reboot(self.server['id'], reboot_type='SOFT')
self._wait_server_status_and_check_network_connectivity()
@test.services('compute', 'network')
def test_server_connectivity_rebuild(self):
+ self._setup_network_and_servers()
image_ref_alt = CONF.compute.image_ref_alt
- self.server.rebuild(image_ref_alt)
+ self.servers_client.rebuild(self.server['id'],
+ image_ref=image_ref_alt)
self._wait_server_status_and_check_network_connectivity()
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.services('compute', 'network')
def test_server_connectivity_pause_unpause(self):
- self.server.pause()
- self.status_timeout(self.compute_client.servers, self.server.id,
- 'PAUSED')
+ self._setup_network_and_servers()
+ self.servers_client.pause_server(self.server['id'])
+ self.servers_client.wait_for_server_status(self.server['id'], 'PAUSED')
self._check_network_connectivity(should_connect=False)
- self.server.unpause()
+ self.servers_client.unpause_server(self.server['id'])
self._wait_server_status_and_check_network_connectivity()
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.services('compute', 'network')
def test_server_connectivity_suspend_resume(self):
- self.server.suspend()
- self.status_timeout(self.compute_client.servers, self.server.id,
- 'SUSPENDED')
+ self._setup_network_and_servers()
+ self.servers_client.suspend_server(self.server['id'])
+ self.servers_client.wait_for_server_status(self.server['id'],
+ 'SUSPENDED')
self._check_network_connectivity(should_connect=False)
- self.server.resume()
+ self.servers_client.resume_server(self.server['id'])
self._wait_server_status_and_check_network_connectivity()
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
@@ -140,9 +147,9 @@
if resize_flavor == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise self.skipException(msg)
- resize_flavor = CONF.compute.flavor_ref_alt
- self.server.resize(resize_flavor)
- self.status_timeout(self.compute_client.servers, self.server.id,
- 'VERIFY_RESIZE')
- self.server.confirm_resize()
+ self._setup_network_and_servers()
+ self.servers_client.resize(self.server['id'], flavor_ref=resize_flavor)
+ self.servers_client.wait_for_server_status(self.server['id'],
+ 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize(self.server['id'])
self._wait_server_status_and_check_network_connectivity()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 81cfd91..e8dba6a 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -99,12 +99,15 @@
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
- self.security_group = \
- self._create_security_group(tenant_id=self.tenant_id)
- self.network, self.subnet, self.router = self._create_networks()
- self.check_networks()
self.keypairs = {}
self.servers = []
+
+ def _setup_network_and_servers(self):
+ self.security_group = \
+ self._create_security_group(tenant_id=self.tenant_id)
+ self.network, self.subnet, self.router = self.create_networks()
+ self.check_networks()
+
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network)
self._check_tenant_network_connectivity()
@@ -123,19 +126,21 @@
self.assertIn(self.network.name, seen_names)
self.assertIn(self.network.id, seen_ids)
- seen_subnets = self._list_subnets()
- seen_net_ids = [n['network_id'] for n in seen_subnets]
- seen_subnet_ids = [n['id'] for n in seen_subnets]
- self.assertIn(self.network.id, seen_net_ids)
- self.assertIn(self.subnet.id, seen_subnet_ids)
+ if self.subnet:
+ seen_subnets = self._list_subnets()
+ seen_net_ids = [n['network_id'] for n in seen_subnets]
+ seen_subnet_ids = [n['id'] for n in seen_subnets]
+ self.assertIn(self.network.id, seen_net_ids)
+ self.assertIn(self.subnet.id, seen_subnet_ids)
- seen_routers = self._list_routers()
- seen_router_ids = [n['id'] for n in seen_routers]
- seen_router_names = [n['name'] for n in seen_routers]
- self.assertIn(self.router.name,
- seen_router_names)
- self.assertIn(self.router.id,
- seen_router_ids)
+ if self.router:
+ seen_routers = self._list_routers()
+ seen_router_ids = [n['id'] for n in seen_routers]
+ seen_router_names = [n['name'] for n in seen_routers]
+ self.assertIn(self.router.name,
+ seen_router_names)
+ self.assertIn(self.router.id,
+ seen_router_ids)
def _create_server(self, name, network):
keypair = self.create_keypair()
@@ -198,7 +203,7 @@
floating_ip, server)
def _create_new_network(self):
- self.new_net = self._create_network(self.tenant_id)
+ self.new_net = self._create_network(tenant_id=self.tenant_id)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
@@ -347,6 +352,7 @@
"""
+ self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
@@ -372,7 +378,7 @@
4. check VM can ping new network dhcp port
"""
-
+ self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index ecb802f..520c232 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
+class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
"""
This test suite assumes that Nova has been configured to
@@ -99,7 +99,7 @@
"""
def __init__(self, credentials):
- self.manager = clients.OfficialClientManager(credentials)
+ self.manager = clients.Manager(credentials)
# Credentials from manager are filled with both names and IDs
self.creds = self.manager.credentials
self.network = None
@@ -113,13 +113,18 @@
self.subnet = subnet
self.router = router
- def _get_tenant_credentials(self):
- # FIXME(andreaf) Unused method
- return self.creds
-
@classmethod
def check_preconditions(cls):
+ if CONF.baremetal.driver_enabled:
+ msg = ('Not currently supported by baremetal.')
+ cls.enabled = False
+ raise cls.skipException(msg)
super(TestSecurityGroupsBasicOps, cls).check_preconditions()
+ # need alt_creds here to check preconditions
+ cls.alt_creds = cls.alt_credentials()
+ cls.alt_manager = clients.Manager(cls.alt_creds)
+ # Credentials from the manager are filled with both IDs and Names
+ cls.alt_creds = cls.alt_manager.credentials
if (cls.alt_creds is None) or \
(cls.tenant_id is cls.alt_creds.tenant_id):
msg = 'No alt_tenant defined'
@@ -137,11 +142,6 @@
# Create no network resources for these tests.
cls.set_network_resources()
super(TestSecurityGroupsBasicOps, cls).setUpClass()
- cls.alt_creds = cls.alt_credentials()
- cls.alt_manager = clients.OfficialClientManager(cls.alt_creds)
- # Credentials from the manager are filled with both IDs and Names
- cls.alt_creds = cls.alt_manager.credentials
- cls.check_preconditions()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
cls.floating_ips = {}
@@ -162,21 +162,22 @@
self._verify_network_details(self.primary_tenant)
self._verify_mac_addr(self.primary_tenant)
- def _create_tenant_keypairs(self, tenant_id):
- keypair = self.create_keypair(
- name=data_utils.rand_name('keypair-smoke-'))
- self.tenants[tenant_id].keypair = keypair
+ def _create_tenant_keypairs(self, tenant):
+ keypair = self.create_keypair(tenant.manager.keypairs_client)
+ tenant.keypair = keypair
def _create_tenant_security_groups(self, tenant):
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
- tenant_id=tenant.creds.tenant_id
+ tenant_id=tenant.creds.tenant_id,
+ client=tenant.manager.network_client
)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
- tenant_id=tenant.creds.tenant_id
+ tenant_id=tenant.creds.tenant_id,
+ client=tenant.manager.network_client
)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
@@ -185,7 +186,9 @@
port_range_max=22,
direction='ingress',
)
- self._create_security_group_rule(secgroup=access_sg, **ssh_rule)
+ self._create_security_group_rule(secgroup=access_sg,
+ client=tenant.manager.network_client,
+ **ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
@@ -212,7 +215,7 @@
myport = (tenant.router.id, tenant.subnet.id)
router_ports = [(i['device_id'], i['fixed_ips'][0]['subnet_id']) for i
- in self.network_client.list_ports()['ports']
+ in self._list_ports()
if self._is_router_port(i)]
self.assertIn(myport, router_ports)
@@ -229,17 +232,16 @@
"""
self._set_compute_context(tenant)
if security_groups is None:
- security_groups = [tenant.security_groups['default'].name]
+ security_groups = [tenant.security_groups['default']]
create_kwargs = {
'nics': [
{'net-id': tenant.network.id},
],
- 'key_name': tenant.keypair.name,
+ 'key_name': tenant.keypair['name'],
'security_groups': security_groups,
'tenant_id': tenant.creds.tenant_id
}
- server = self.create_server(name=name, create_kwargs=create_kwargs)
- return server
+ return self.create_server(name=name, create_kwargs=create_kwargs)
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
@@ -257,27 +259,30 @@
in order to access tenant internal network
workaround ip namespace
"""
- secgroups = [sg.name for sg in tenant.security_groups.values()]
+ secgroups = tenant.security_groups.values()
name = 'server-{tenant}-access_point-'.format(
tenant=tenant.creds.tenant_name)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant,
security_groups=secgroups)
tenant.access_point = server
- self._assign_floating_ips(server)
+ self._assign_floating_ips(tenant, server)
- def _assign_floating_ips(self, server):
+ def _assign_floating_ips(self, tenant, server):
public_network_id = CONF.network.public_network_id
- floating_ip = self._create_floating_ip(server, public_network_id)
- self.floating_ips.setdefault(server, floating_ip)
+ floating_ip = self._create_floating_ip(
+ server, public_network_id,
+ client=tenant.manager.network_client)
+ self.floating_ips.setdefault(server['id'], floating_ip)
def _create_tenant_network(self, tenant):
- network, subnet, router = self._create_networks(tenant.creds.tenant_id)
+ network, subnet, router = self.create_networks(
+ client=tenant.manager.network_client)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
- self.compute_client = tenant.manager.compute_client
- return self.compute_client
+ self.servers_client = tenant.manager.servers_client
+ return self.servers_client
def _deploy_tenant(self, tenant_or_id):
"""
@@ -290,12 +295,10 @@
"""
if not isinstance(tenant_or_id, self.TenantProperties):
tenant = self.tenants[tenant_or_id]
- tenant_id = tenant_or_id
else:
tenant = tenant_or_id
- tenant_id = tenant.creds.tenant_id
self._set_compute_context(tenant)
- self._create_tenant_keypairs(tenant_id)
+ self._create_tenant_keypairs(tenant)
self._create_tenant_network(tenant)
self._create_tenant_security_groups(tenant)
self._set_access_point(tenant)
@@ -305,12 +308,12 @@
returns the ip (floating/internal) of a server
"""
if floating:
- server_ip = self.floating_ips[server].floating_ip_address
+ server_ip = self.floating_ips[server['id']].floating_ip_address
else:
server_ip = None
- network_name = self.tenants[server.tenant_id].network.name
- if network_name in server.networks:
- server_ip = server.networks[network_name][0]
+ network_name = self.tenants[server['tenant_id']].network.name
+ if network_name in server['addresses']:
+ server_ip = server['addresses'][network_name][0]['addr']
return server_ip
def _connect_to_access_point(self, tenant):
@@ -318,8 +321,8 @@
create ssh connection to tenant access point
"""
access_point_ssh = \
- self.floating_ips[tenant.access_point].floating_ip_address
- private_key = tenant.keypair.private_key
+ self.floating_ips[tenant.access_point['id']].floating_ip_address
+ private_key = tenant.keypair['private_key']
access_point_ssh = self._ssh_to_server(access_point_ssh,
private_key=private_key)
return access_point_ssh
@@ -383,6 +386,7 @@
)
self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
+ client=dest_tenant.manager.network_client,
**ruleset
)
access_point_ssh = self._connect_to_access_point(source_tenant)
@@ -396,6 +400,7 @@
# allow reverse traffic and check
self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
+ client=source_tenant.manager.network_client,
**ruleset
)
@@ -414,8 +419,7 @@
mac_addr = mac_addr.strip().lower()
# Get the fixed_ips and mac_address fields of all ports. Select
# only those two columns to reduce the size of the response.
- port_list = self.network_client.list_ports(
- fields=['fixed_ips', 'mac_address'])['ports']
+ port_list = self._list_ports(fields=['fixed_ips', 'mac_address'])
port_detail_list = [
(port['fixed_ips'][0]['subnet_id'],
port['fixed_ips'][0]['ip_address'],
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index c32923a..fdda423 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -117,7 +117,7 @@
private_key=keypair['private_key'])
except Exception:
LOG.exception('ssh to server failed')
- self._log_console_output(self)
+ self._log_console_output(servers=[server])
raise
def _get_content(self, ssh_client):
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 07eee8a..032e1da 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -308,3 +308,33 @@
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
+
+ @base.handle_errors
+ def get_console(self, node_uuid):
+ """
+ Get connection information about the console.
+
+ :param node_uuid: Unique identifier of the node in UUID format.
+
+ """
+
+ resp, body = self._show_request('nodes/states/console', node_uuid)
+ self.expected_success(200, resp.status)
+ return resp, body
+
+ @base.handle_errors
+ def set_console_mode(self, node_uuid, enabled):
+ """
+ Start and stop the node console.
+
+ :param node_uuid: Unique identifier of the node in UUID format.
+ :param enabled: Boolean value; whether to enable or disable the
+ console.
+
+ """
+
+ enabled = {'enabled': enabled}
+ resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
+ enabled)
+ self.expected_success(202, resp.status)
+ return resp, body
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index ac65f81..e76c1bd 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -309,6 +309,7 @@
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
+ self.expected_success(200, resp.status)
return resp, body['access']
@@ -326,6 +327,7 @@
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
+ self.expected_success(200, resp.status)
return resp, body['access']
diff --git a/tempest/services/identity/v3/json/credentials_client.py b/tempest/services/identity/v3/json/credentials_client.py
index f795c7b..d424f4c 100644
--- a/tempest/services/identity/v3/json/credentials_client.py
+++ b/tempest/services/identity/v3/json/credentials_client.py
@@ -41,13 +41,14 @@
}
post_body = json.dumps({'credential': post_body})
resp, body = self.post('credentials', post_body)
+ self.expected_success(201, resp.status)
body = json.loads(body)
body['credential']['blob'] = json.loads(body['credential']['blob'])
return resp, body['credential']
def update_credential(self, credential_id, **kwargs):
"""Updates a credential."""
- resp, body = self.get_credential(credential_id)
+ _, body = self.get_credential(credential_id)
cred_type = kwargs.get('type', body['type'])
access_key = kwargs.get('access_key', body['blob']['access'])
secret_key = kwargs.get('secret_key', body['blob']['secret'])
@@ -63,6 +64,7 @@
}
post_body = json.dumps({'credential': post_body})
resp, body = self.patch('credentials/%s' % credential_id, post_body)
+ self.expected_success(200, resp.status)
body = json.loads(body)
body['credential']['blob'] = json.loads(body['credential']['blob'])
return resp, body['credential']
@@ -70,6 +72,7 @@
def get_credential(self, credential_id):
"""To GET Details of a credential."""
resp, body = self.get('credentials/%s' % credential_id)
+ self.expected_success(200, resp.status)
body = json.loads(body)
body['credential']['blob'] = json.loads(body['credential']['blob'])
return resp, body['credential']
@@ -77,10 +80,12 @@
def list_credentials(self):
"""Lists out all the available credentials."""
resp, body = self.get('credentials')
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['credentials']
def delete_credential(self, credential_id):
"""Deletes a credential."""
resp, body = self.delete('credentials/%s' % credential_id)
+ self.expected_success(204, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
index f7a894b..c3fedb2 100644
--- a/tempest/services/identity/v3/json/endpoints_client.py
+++ b/tempest/services/identity/v3/json/endpoints_client.py
@@ -32,6 +32,7 @@
def list_endpoints(self):
"""GET endpoints."""
resp, body = self.get('endpoints')
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['endpoints']
@@ -56,6 +57,7 @@
}
post_body = json.dumps({'endpoint': post_body})
resp, body = self.post('endpoints', post_body)
+ self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['endpoint']
@@ -82,10 +84,12 @@
post_body['enabled'] = enabled
post_body = json.dumps({'endpoint': post_body})
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['endpoint']
def delete_endpoint(self, endpoint_id):
"""Delete endpoint."""
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
+ self.expected_success(204, resp_header.status)
return resp_header, resp_body
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 0522f37..df424ca 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -588,6 +588,7 @@
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
+ self.expected_success(201, resp.status)
return resp, body
def request(self, method, url, extra_headers=False, headers=None,
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policy_client.py
index 3c90fa1..e093260 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policy_client.py
@@ -37,12 +37,14 @@
}
post_body = json.dumps({'policy': post_body})
resp, body = self.post('policies', post_body)
+ self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['policy']
def list_policies(self):
"""Lists the policies."""
resp, body = self.get('policies')
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['policies']
@@ -50,12 +52,12 @@
"""Lists out the given policy."""
url = 'policies/%s' % policy_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['policy']
def update_policy(self, policy_id, **kwargs):
"""Updates a policy."""
- resp, body = self.get_policy(policy_id)
type = kwargs.get('type')
post_body = {
'type': type
@@ -63,10 +65,13 @@
post_body = json.dumps({'policy': post_body})
url = 'policies/%s' % policy_id
resp, body = self.patch(url, post_body)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['policy']
def delete_policy(self, policy_id):
"""Deletes the policy."""
url = "policies/%s" % policy_id
- return self.delete(url)
+ resp, body = self.delete(url)
+ self.expected_success(204, resp.status)
+ return resp, body
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/region_client.py
index c078765..becea6b 100644
--- a/tempest/services/identity/v3/json/region_client.py
+++ b/tempest/services/identity/v3/json/region_client.py
@@ -43,6 +43,7 @@
'regions/%s' % kwargs.get('unique_region_id'), req_body)
else:
resp, body = self.post('regions', req_body)
+ self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['region']
@@ -55,6 +56,7 @@
post_body['parent_region_id'] = kwargs.get('parent_region_id')
post_body = json.dumps({'region': post_body})
resp, body = self.patch('regions/%s' % region_id, post_body)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['region']
@@ -62,6 +64,7 @@
"""Get region."""
url = 'regions/%s' % region_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['region']
@@ -71,10 +74,12 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['regions']
def delete_region(self, region_id):
"""Delete region."""
resp, body = self.delete('regions/%s' % region_id)
+ self.expected_success(204, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/json/service_client.py b/tempest/services/identity/v3/json/service_client.py
index 82e8aad..8e89957 100644
--- a/tempest/services/identity/v3/json/service_client.py
+++ b/tempest/services/identity/v3/json/service_client.py
@@ -57,10 +57,10 @@
def create_service(self, serv_type, name=None, description=None,
enabled=True):
body_dict = {
- "name": name,
+ 'name': name,
'type': serv_type,
'enabled': enabled,
- "description": description,
+ 'description': description,
}
body = json.dumps({'service': body_dict})
resp, body = self.post("services", body)
@@ -73,3 +73,9 @@
resp, body = self.delete(url)
self.expected_success(204, resp.status)
return resp, body
+
+ def list_services(self):
+ resp, body = self.get('services')
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body['services']
diff --git a/tempest/services/identity/v3/xml/credentials_client.py b/tempest/services/identity/v3/xml/credentials_client.py
index 3c44188..37513d0 100644
--- a/tempest/services/identity/v3/xml/credentials_client.py
+++ b/tempest/services/identity/v3/xml/credentials_client.py
@@ -60,13 +60,14 @@
type=cred_type, user_id=user_id)
credential.append(blob)
resp, body = self.post('credentials', str(common.Document(credential)))
+ self.expected_success(201, resp.status)
body = self._parse_body(etree.fromstring(body))
body['blob'] = json.loads(body['blob'])
return resp, body
def update_credential(self, credential_id, **kwargs):
"""Updates a credential."""
- resp, body = self.get_credential(credential_id)
+ _, body = self.get_credential(credential_id)
cred_type = kwargs.get('type', body['type'])
access_key = kwargs.get('access_key', body['blob']['access'])
secret_key = kwargs.get('secret_key', body['blob']['secret'])
@@ -83,6 +84,7 @@
credential.append(blob)
resp, body = self.patch('credentials/%s' % credential_id,
str(common.Document(credential)))
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
body['blob'] = json.loads(body['blob'])
return resp, body
@@ -90,6 +92,7 @@
def get_credential(self, credential_id):
"""To GET Details of a credential."""
resp, body = self.get('credentials/%s' % credential_id)
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
body['blob'] = json.loads(body['blob'])
return resp, body
@@ -97,10 +100,12 @@
def list_credentials(self):
"""Lists out all the available credentials."""
resp, body = self.get('credentials')
+ self.expected_success(200, resp.status)
body = self._parse_creds(etree.fromstring(body))
return resp, body
def delete_credential(self, credential_id):
"""Deletes a credential."""
resp, body = self.delete('credentials/%s' % credential_id)
+ self.expected_success(204, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index 6490e34..892fb58 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -65,6 +65,7 @@
def list_endpoints(self):
"""Get the list of endpoints."""
resp, body = self.get("endpoints")
+ self.expected_success(200, resp.status)
body = self._parse_array(etree.fromstring(body))
return resp, body
@@ -90,6 +91,7 @@
enabled=enabled)
resp, body = self.post('endpoints',
str(common.Document(create_endpoint)))
+ self.expected_success(201, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -120,10 +122,12 @@
endpoint.add_attr("enabled", str(enabled).lower())
resp, body = self.patch('endpoints/%s' % str(endpoint_id), str(doc))
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
def delete_endpoint(self, endpoint_id):
"""Delete endpoint."""
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
+ self.expected_success(204, resp_header.status)
return resp_header, resp_body
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 5b761b3..5c43692 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -590,6 +590,7 @@
auth.append(scope)
resp, body = self.post(self.auth_url, body=str(common.Document(auth)))
+ self.expected_success(201, resp.status)
return resp, body
def request(self, method, url, extra_headers=False, headers=None,
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index 73d831b..41bbfe5 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -67,12 +67,14 @@
create_policy = common.Element("policy", xmlns=XMLNS,
blob=blob, type=type)
resp, body = self.post('policies', str(common.Document(create_policy)))
+ self.expected_success(201, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
def list_policies(self):
"""Lists the policies."""
resp, body = self.get('policies')
+ self.expected_success(200, resp.status)
body = self._parse_array(etree.fromstring(body))
return resp, body
@@ -80,20 +82,23 @@
"""Lists out the given policy."""
url = 'policies/%s' % policy_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
def update_policy(self, policy_id, **kwargs):
"""Updates a policy."""
- resp, body = self.get_policy(policy_id)
type = kwargs.get('type')
update_policy = common.Element("policy", xmlns=XMLNS, type=type)
url = 'policies/%s' % policy_id
resp, body = self.patch(url, str(common.Document(update_policy)))
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
def delete_policy(self, policy_id):
"""Deletes the policy."""
url = "policies/%s" % policy_id
- return self.delete(url)
+ resp, body = self.delete(url)
+ self.expected_success(204, resp.status)
+ return resp, body
diff --git a/tempest/services/identity/v3/xml/region_client.py b/tempest/services/identity/v3/xml/region_client.py
index f854138..7669678 100644
--- a/tempest/services/identity/v3/xml/region_client.py
+++ b/tempest/services/identity/v3/xml/region_client.py
@@ -79,6 +79,7 @@
else:
resp, body = self.post('regions',
str(common.Document(create_region)))
+ self.expected_success(201, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -95,6 +96,7 @@
resp, body = self.patch('regions/%s' % str(region_id),
str(common.Document(update_region)))
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -102,6 +104,7 @@
"""Get Region."""
url = 'regions/%s' % region_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -111,10 +114,12 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = self._parse_array(etree.fromstring(body))
return resp, body
def delete_region(self, region_id):
"""Delete region."""
resp, body = self.delete('regions/%s' % region_id)
+ self.expected_success(204, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/xml/service_client.py b/tempest/services/identity/v3/xml/service_client.py
index 3beeb89..14adfac 100644
--- a/tempest/services/identity/v3/xml/service_client.py
+++ b/tempest/services/identity/v3/xml/service_client.py
@@ -37,6 +37,14 @@
data = common.xml_to_json(body)
return data
+ def _parse_array(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "service":
+ array.append(common.xml_to_json(child))
+ return array
+
def update_service(self, service_id, **kwargs):
"""Updates a service_id."""
resp, body = self.get_service(service_id)
@@ -79,3 +87,9 @@
resp, body = self.delete(url)
self.expected_success(204, resp.status)
return resp, body
+
+ def list_services(self):
+ resp, body = self.get('services')
+ self.expected_success(200, resp.status)
+ body = self._parse_array(etree.fromstring(body))
+ return resp, body
diff --git a/tempest/services/identity/xml/identity_client.py b/tempest/services/identity/xml/identity_client.py
index 4ada46c..eaf9390 100644
--- a/tempest/services/identity/xml/identity_client.py
+++ b/tempest/services/identity/xml/identity_client.py
@@ -157,6 +157,7 @@
auth = xml.Element('auth', **auth_kwargs)
auth.append(passwordCreds)
resp, body = self.post(self.auth_url, body=str(xml.Document(auth)))
+ self.expected_success(200, resp.status)
return resp, body['access']
def auth_token(self, token_id, tenant=None):
@@ -167,4 +168,5 @@
auth = xml.Element('auth', **auth_kwargs)
auth.append(tokenCreds)
resp, body = self.post(self.auth_url, body=str(xml.Document(auth)))
+ self.expected_success(200, resp.status)
return resp, body['access']
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index d3867cd..15306a0 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -62,6 +62,7 @@
uri = 'stacks'
resp, body = self.post(uri, headers=headers, body=body)
self.expected_success(201, resp.status)
+ body = json.loads(body)
return resp, body
def update_stack(self, stack_identifier, name, disable_rollback=True,
diff --git a/tempest/services/volume/json/qos_client.py b/tempest/services/volume/json/qos_client.py
new file mode 100644
index 0000000..6e0bee9
--- /dev/null
+++ b/tempest/services/volume/json/qos_client.py
@@ -0,0 +1,161 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import time
+
+from tempest.common import rest_client
+from tempest import config
+from tempest import exceptions
+
+CONF = config.CONF
+
+
+class BaseQosSpecsClientJSON(rest_client.RestClient):
+ """Client class to send CRUD QoS API requests"""
+
+ def __init__(self, auth_provider):
+ super(BaseQosSpecsClientJSON, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+ self.build_interval = CONF.volume.build_interval
+ self.build_timeout = CONF.volume.build_timeout
+
+ def is_resource_deleted(self, qos_id):
+ try:
+ self.get_qos(qos_id)
+ except exceptions.NotFound:
+ return True
+ return False
+
+ def wait_for_qos_operations(self, qos_id, operation, args=None):
+ """Waits for a qos operations to be completed.
+
+ NOTE : operation value is required for wait_for_qos_operations()
+ operation = 'qos-key' / 'disassociate' / 'disassociate-all'
+ args = keys[] when operation = 'qos-key'
+ args = volume-type-id disassociated when operation = 'disassociate'
+ args = None when operation = 'disassociate-all'
+ """
+ start_time = int(time.time())
+ while True:
+ if operation == 'qos-key-unset':
+ resp, body = self.get_qos(qos_id)
+ self.expected_success(200, resp.status)
+ if not any(key in body['specs'] for key in args):
+ return
+ elif operation == 'disassociate':
+ resp, body = self.get_association_qos(qos_id)
+ self.expected_success(200, resp.status)
+ if not any(args in body[i]['id'] for i in range(0, len(body))):
+ return
+ elif operation == 'disassociate-all':
+ resp, body = self.get_association_qos(qos_id)
+ self.expected_success(200, resp.status)
+ if not body:
+ return
+ else:
+ msg = (" operation value is either not defined or incorrect.")
+ raise exceptions.UnprocessableEntity(msg)
+
+ if int(time.time()) - start_time >= self.build_timeout:
+ raise exceptions.TimeoutException
+ time.sleep(self.build_interval)
+
+ def create_qos(self, name, consumer, **kwargs):
+ """Create a QoS Specification.
+
+ name : name of the QoS specifications
+ consumer : conumer of Qos ( front-end / back-end / both )
+ """
+ post_body = {'name': name, 'consumer': consumer}
+ post_body.update(kwargs)
+ post_body = json.dumps({'qos_specs': post_body})
+ resp, body = self.post('qos-specs', post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body['qos_specs']
+
+ def delete_qos(self, qos_id, force=False):
+ """Delete the specified QoS specification."""
+ resp, body = self.delete(
+ "qos-specs/%s?force=%s" % (str(qos_id), force))
+ self.expected_success(202, resp.status)
+
+ def list_qos(self):
+ """List all the QoS specifications created."""
+ url = 'qos-specs'
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return resp, body['qos_specs']
+
+ def get_qos(self, qos_id):
+ """Get the specified QoS specification."""
+ url = "qos-specs/%s" % str(qos_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return resp, body['qos_specs']
+
+ def set_qos_key(self, qos_id, **kwargs):
+ """Set the specified keys/values of QoS specification.
+
+ kwargs : it is the dictionary of the key=value pairs to set
+ """
+ put_body = json.dumps({"qos_specs": kwargs})
+ resp, body = self.put('qos-specs/%s' % qos_id, put_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return resp, body['qos_specs']
+
+ def unset_qos_key(self, qos_id, keys):
+ """Unset the specified keys of QoS specification.
+
+ keys : it is the array of the keys to unset
+ """
+ put_body = json.dumps({'keys': keys})
+ resp, _ = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
+ self.expected_success(202, resp.status)
+
+ def associate_qos(self, qos_id, vol_type_id):
+ """Associate the specified QoS with specified volume-type."""
+ url = "qos-specs/%s/associate" % str(qos_id)
+ url += "?vol_type_id=%s" % vol_type_id
+ resp, _ = self.get(url)
+ self.expected_success(202, resp.status)
+
+ def get_association_qos(self, qos_id):
+ """Get the association of the specified QoS specification."""
+ url = "qos-specs/%s/associations" % str(qos_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return resp, body['qos_associations']
+
+ def disassociate_qos(self, qos_id, vol_type_id):
+ """Disassociate the specified QoS with specified volume-type."""
+ url = "qos-specs/%s/disassociate" % str(qos_id)
+ url += "?vol_type_id=%s" % vol_type_id
+ resp, _ = self.get(url)
+ self.expected_success(202, resp.status)
+
+ def disassociate_all_qos(self, qos_id):
+ """Disassociate the specified QoS with all associations."""
+ url = "qos-specs/%s/disassociate_all" % str(qos_id)
+ resp, _ = self.get(url)
+ self.expected_success(202, resp.status)
+
+
+class QosSpecsClientJSON(BaseQosSpecsClientJSON):
+ """Volume V1 QoS client."""
diff --git a/tempest/services/volume/v2/json/qos_client.py b/tempest/services/volume/v2/json/qos_client.py
new file mode 100644
index 0000000..a734df8
--- /dev/null
+++ b/tempest/services/volume/v2/json/qos_client.py
@@ -0,0 +1,23 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.json import qos_client
+
+
+class QosSpecsV2ClientJSON(qos_client.BaseQosSpecsClientJSON):
+
+ def __init__(self, auth_provider):
+ super(QosSpecsV2ClientJSON, self).__init__(auth_provider)
+
+ self.api_version = "v2"
diff --git a/tempest/test.py b/tempest/test.py
index f34933e..d2b32d4 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -76,7 +76,10 @@
f(cls)
except Exception as se:
etype, value, trace = sys.exc_info()
- LOG.exception("setUpClass failed: %s" % se)
+ if etype is cls.skipException:
+ LOG.info("setUpClass skipped: %s:" % se)
+ else:
+ LOG.exception("setUpClass failed: %s" % se)
try:
cls.tearDownClass()
except Exception as te:
@@ -89,12 +92,7 @@
return decorator
-def services(*args, **kwargs):
- """A decorator used to set an attr for each service used in a test case
-
- This decorator applies a testtools attr for each service that gets
- exercised by a test case.
- """
+def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
@@ -110,16 +108,29 @@
'telemetry': CONF.service_available.ceilometer,
'data_processing': CONF.service_available.sahara
}
+ return service_list
+
+def services(*args, **kwargs):
+ """A decorator used to set an attr for each service used in a test case
+
+ This decorator applies a testtools attr for each service that gets
+ exercised by a test case.
+ """
def decorator(f):
+ services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
+ 'network', 'identity', 'object_storage', 'dashboard',
+ 'ceilometer', 'data_processing']
for service in args:
- if service not in service_list:
- raise exceptions.InvalidServiceTag('%s is not a valid service'
- % service)
+ if service not in services:
+ raise exceptions.InvalidServiceTag('%s is not a valid '
+ 'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
+ service_list = get_service_list()
+
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index d0140dd..a28684e 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -238,8 +238,8 @@
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
- self.assertEqual(['fake1', 'fake2', 'not_fake'],
- results['neutron']['extensions'])
+ self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+ sorted(results['neutron']['extensions']))
def test_verify_extensions_cinder(self):
def fake_list_extensions():
@@ -277,8 +277,8 @@
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
- self.assertEqual(['fake1', 'fake2', 'not_fake'],
- results['cinder']['extensions'])
+ self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+ sorted(results['cinder']['extensions']))
def test_verify_extensions_nova(self):
def fake_list_extensions():
@@ -316,8 +316,8 @@
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
- self.assertEqual(['fake1', 'fake2', 'not_fake'],
- results['nova']['extensions'])
+ self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+ sorted(results['nova']['extensions']))
def test_verify_extensions_nova_v3(self):
def fake_list_extensions():
@@ -355,8 +355,8 @@
'nova_v3', {})
self.assertIn('nova_v3', results)
self.assertIn('extensions', results['nova_v3'])
- self.assertEqual(['fake1', 'fake2', 'not_fake'],
- results['nova_v3']['extensions'])
+ self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
+ sorted(results['nova_v3']['extensions']))
def test_verify_extensions_swift(self):
def fake_list_extensions():
@@ -395,5 +395,5 @@
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
- self.assertEqual(['not_fake', 'fake1', 'fake2'],
- results['swift']['extensions'])
+ self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
+ sorted(results['swift']['extensions']))
diff --git a/tempest/tests/common/test_accounts.py b/tempest/tests/common/test_accounts.py
index c24bfb6..a0b3496 100644
--- a/tempest/tests/common/test_accounts.py
+++ b/tempest/tests/common/test_accounts.py
@@ -185,3 +185,48 @@
hash_list[2])
remove_mock.mock.assert_called_once_with(hash_path)
rmdir_mock.mock.assert_not_called()
+
+ def test_is_multi_user(self):
+ test_accounts_class = accounts.Accounts('test_name')
+ self.assertTrue(test_accounts_class.is_multi_user())
+
+ def test_is_not_multi_user(self):
+ self.test_accounts = [self.test_accounts[0]]
+ self.useFixture(mockpatch.Patch(
+ 'tempest.common.accounts.read_accounts_yaml',
+ return_value=self.test_accounts))
+ test_accounts_class = accounts.Accounts('test_name')
+ self.assertFalse(test_accounts_class.is_multi_user())
+
+
+class TestNotLockingAccount(base.TestCase):
+
+ def setUp(self):
+ super(TestNotLockingAccount, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+ self.temp_dir = tempfile.mkdtemp()
+ cfg.CONF.set_default('lock_path', self.temp_dir)
+ self.addCleanup(os.rmdir, self.temp_dir)
+ self.test_accounts = [
+ {'username': 'test_user1', 'tenant_name': 'test_tenant1',
+ 'password': 'p'},
+ {'username': 'test_user2', 'tenant_name': 'test_tenant2',
+ 'password': 'p'},
+ {'username': 'test_user3', 'tenant_name': 'test_tenant3',
+ 'password': 'p'},
+ ]
+ self.useFixture(mockpatch.Patch(
+ 'tempest.common.accounts.read_accounts_yaml',
+ return_value=self.test_accounts))
+ cfg.CONF.set_default('test_accounts_file', '', group='auth')
+
+ def test_get_creds(self):
+ test_accounts_class = accounts.NotLockingAccounts('test_name')
+ for i in xrange(len(self.test_accounts)):
+ creds = test_accounts_class.get_creds(i)
+ msg = "Empty credentials returned for ID %s" % str(i)
+ self.assertIsNotNone(creds, msg)
+ self.assertRaises(exceptions.InvalidConfiguration,
+ test_accounts_class.get_creds,
+ id=len(self.test_accounts))
\ No newline at end of file
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
index 9da5f92..ea576c4 100644
--- a/tempest/tests/test_credentials.py
+++ b/tempest/tests/test_credentials.py
@@ -128,12 +128,22 @@
creds = self._get_credentials()
self.assertTrue(creds.is_valid())
- def test_is_not_valid(self):
+ def _test_is_not_valid(self, ignore_key):
creds = self._get_credentials()
for attr in self.attributes.keys():
+ if attr == ignore_key:
+ continue
+ temp_attr = getattr(creds, attr)
delattr(creds, attr)
self.assertFalse(creds.is_valid(),
"Credentials should be invalid without %s" % attr)
+ setattr(creds, attr, temp_attr)
+
+ def test_is_not_valid(self):
+ # NOTE(mtreinish): A KeystoneV2 credential object is valid without
+ # a tenant_name. So skip that check. See tempest.auth for the valid
+ # credential requirements
+ self._test_is_not_valid('tenant_name')
def test_default(self):
self.useFixture(fixtures.LockFixture('auth_version'))
@@ -205,6 +215,12 @@
config_value = 'fake_' + attr
self.assertEqual(getattr(creds, attr), config_value)
+ def test_is_not_valid(self):
+ # NOTE(mtreinish) For a Keystone V3 credential object a project name
+ # is not required to be valid, so we skip that check. See tempest.auth
+ # for the valid credential requirements
+ self._test_is_not_valid('project_name')
+
def test_synced_attributes(self):
attributes = self.attributes
# Create V3 credentials with tenant instead of project, and user_domain
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index eddbb1d..48c523e 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -272,6 +272,13 @@
@mock.patch('tempest.common.rest_client.RestClient')
def test_network_cleanup(self, MockRestClient):
+ def side_effect(**args):
+ return ({'status': 200},
+ {"security_groups": [{"tenant_id": args['tenant_id'],
+ "name": args['name'],
+ "description": args['name'],
+ "security_group_rules": [],
+ "id": "sg-%s" % args['tenant_id']}]})
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
# Create primary tenant and network
@@ -341,7 +348,23 @@
return_value=return_values)
port_list_mock.start()
+ secgroup_list_mock = mock.patch.object(iso_creds.network_admin_client,
+ 'list_security_groups',
+ side_effect=side_effect)
+ secgroup_list_mock.start()
+
+ return_values = (fake_http.fake_httplib({}, status=204), {})
+ remove_secgroup_mock = self.patch(
+ 'tempest.services.network.network_client_base.'
+ 'NetworkClientBase.delete', return_value=return_values)
iso_creds.clear_isolated_creds()
+ # Verify default security group delete
+ calls = remove_secgroup_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1][0], calls)
+ self.assertIn('v2.0/security-groups/sg-1234', args)
+ self.assertIn('v2.0/security-groups/sg-12345', args)
+ self.assertIn('v2.0/security-groups/sg-123456', args)
# Verify remove router interface calls
calls = remove_router_interface_mock.mock_calls
self.assertEqual(len(calls), 3)
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index bba4012..3f4ac7d 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -69,14 +69,14 @@
self.assertEqual(
p.returncode, expected,
- "Stdout: %s; Stderr: %s" % (p.stdout, p.stderr))
+ "Stdout: %s; Stderr: %s" % (p.stdout.read(), p.stderr.read()))
def test_pretty_tox(self):
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'], stderr=DEVNULL)
- self.assertRunExit('pretty_tox.sh tests.passing', 0)
+ self.assertRunExit('pretty_tox.sh passing', 0)
def test_pretty_tox_fails(self):
# Git init is required for the pbr testr command. pbr requires a git
@@ -86,7 +86,7 @@
self.assertRunExit('pretty_tox.sh', 1)
def test_pretty_tox_serial(self):
- self.assertRunExit('pretty_tox_serial.sh tests.passing', 0)
+ self.assertRunExit('pretty_tox_serial.sh passing', 0)
def test_pretty_tox_serial_fails(self):
self.assertRunExit('pretty_tox_serial.sh', 1)
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 4bf71f3..f94d880 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -108,8 +108,8 @@
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
- """:returns: Retruns with an error string if not matches,
- returns with None when matches.
+ """:returns: Returns with an error string if it does not match,
+ returns with None when it matches.
"""
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
@@ -485,7 +485,7 @@
@classmethod
def destroy_volume_wait(cls, volume):
- """Delete volume, tryies to detach first.
+ """Delete volume, tries to detach first.
Use just for teardown!
"""
exc_num = 0
@@ -518,7 +518,7 @@
@classmethod
def destroy_snapshot_wait(cls, snapshot):
- """delete snaphot, wait until not exists."""
+ """delete snapshot, wait until it ceases to exist."""
snapshot.delete()
def _update():
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 2c68d6b..c0d3f7a 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from boto import exception
-
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
@@ -82,6 +80,13 @@
raise exceptions.EC2RegisterImageException(
image_id=image["image_id"])
+ def _terminate_reservation(self, reservation, rcuk):
+ for instance in reservation.instances:
+ instance.terminate()
+ for instance in reservation.instances:
+ self.assertInstanceStateWait(instance, '_GONE')
+ self.cancelResourceCleanUp(rcuk)
+
def test_run_idempotent_instances(self):
# EC2 run instances idempotently
@@ -96,11 +101,6 @@
reservation)
return (reservation, rcuk)
- def _terminate_reservation(reservation, rcuk):
- for instance in reservation.instances:
- instance.terminate()
- self.cancelResourceCleanUp(rcuk)
-
reservation_1, rcuk_1 = _run_instance('token_1')
reservation_2, rcuk_2 = _run_instance('token_2')
reservation_1a, rcuk_1a = _run_instance('token_1')
@@ -116,8 +116,8 @@
# handled by rcuk1
self.cancelResourceCleanUp(rcuk_1a)
- _terminate_reservation(reservation_1, rcuk_1)
- _terminate_reservation(reservation_2, rcuk_2)
+ self._terminate_reservation(reservation_1, rcuk_1)
+ self._terminate_reservation(reservation_2, rcuk_2)
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
@@ -139,9 +139,7 @@
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
- for instance in reservation.instances:
- instance.terminate()
- self.cancelResourceCleanUp(rcuk)
+ self._terminate_reservation(reservation, rcuk)
def test_run_stop_terminate_instance_with_tags(self):
# EC2 run, stop and terminate instance with tags
@@ -188,9 +186,7 @@
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
- for instance in reservation.instances:
- instance.terminate()
- self.cancelResourceCleanUp(rcuk)
+ self._terminate_reservation(reservation, rcuk)
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
@@ -202,18 +198,7 @@
for instance in reservation.instances:
instance.terminate()
- try:
- instance.update(validate=True)
- except ValueError:
- pass
- except exception.EC2ResponseError as exc:
- if self.ec2_error_code.\
- client.InvalidInstanceID.NotFound.match(exc) is None:
- pass
- else:
- raise
- else:
- self.assertNotEqual(instance.state, "running")
+ self.assertInstanceStateWait(instance, '_GONE')
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
diff --git a/tools/check_logs.py b/tools/check_logs.py
index e5d26f7..7cf9d85 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -26,8 +26,9 @@
import yaml
-is_grenade = (os.environ.get('DEVSTACK_GATE_GRENADE', "0") == "1" or
- os.environ.get('DEVSTACK_GATE_GRENADE_FORWARD', "0") == "1")
+# DEVSTACK_GATE_GRENADE is either unset if grenade is not running
+# or a string describing what type of grenade run to perform.
+is_grenade = os.environ.get('DEVSTACK_GATE_GRENADE') is not None
dump_all_errors = True
# As logs are made clean, add to this set
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
index 8ad59bb..57e58f2 100755
--- a/tools/subunit-trace.py
+++ b/tools/subunit-trace.py
@@ -23,7 +23,6 @@
import re
import sys
-import mimeparse
import subunit
import testtools
@@ -32,55 +31,6 @@
RESULTS = {}
-class Starts(testtools.StreamResult):
-
- def __init__(self, output):
- super(Starts, self).__init__()
- self._output = output
-
- def startTestRun(self):
- self._neednewline = False
- self._emitted = set()
-
- def status(self, test_id=None, test_status=None, test_tags=None,
- runnable=True, file_name=None, file_bytes=None, eof=False,
- mime_type=None, route_code=None, timestamp=None):
- super(Starts, self).status(
- test_id, test_status,
- test_tags=test_tags, runnable=runnable, file_name=file_name,
- file_bytes=file_bytes, eof=eof, mime_type=mime_type,
- route_code=route_code, timestamp=timestamp)
- if not test_id:
- if not file_bytes:
- return
- if not mime_type or mime_type == 'test/plain;charset=utf8':
- mime_type = 'text/plain; charset=utf-8'
- primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
- content_type = testtools.content_type.ContentType(
- primary, sub, parameters)
- content = testtools.content.Content(
- content_type, lambda: [file_bytes])
- text = content.as_text()
- if text and text[-1] not in '\r\n':
- self._neednewline = True
- self._output.write(text)
- elif test_status == 'inprogress' and test_id not in self._emitted:
- if self._neednewline:
- self._neednewline = False
- self._output.write('\n')
- worker = ''
- for tag in test_tags or ():
- if tag.startswith('worker-'):
- worker = '(' + tag[7:] + ') '
- if timestamp:
- timestr = timestamp.isoformat()
- else:
- timestr = ''
- self._output.write('%s: %s%s [start]\n' %
- (timestr, worker, test_id))
- self._emitted.add(test_id)
-
-
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
@@ -274,17 +224,19 @@
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
- starts = Starts(sys.stdout)
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures))
summary = testtools.StreamSummary()
- result = testtools.CopyStreamResult([starts, outcomes, summary])
+ result = testtools.CopyStreamResult([outcomes, summary])
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
+ if count_tests('status', '.*') == 0:
+ print("The test run didn't actually run any tests")
+ return 1
if args.post_fails:
print_fails(sys.stdout)
print_summary(sys.stdout)
diff --git a/tox.ini b/tox.ini
index a071d4b..492c4f6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,29 +6,26 @@
[testenv]
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
- PYTHONHASHSEED=0
usedevelop = True
install_command = pip install -U {opts} {packages}
[testenv:py26]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
[testenv:py33]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
[testenv:py27]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
[testenv:cover]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
+deps = -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
[testenv:all]
sitepackages = True
@@ -94,6 +91,7 @@
-r{toxinidir}/test-requirements.txt
[testenv:pep8]
+setenv = PYTHONHASHSEED=0
commands =
flake8 {posargs}
{toxinidir}/tools/config/check_uptodate.sh