Merge "Add environmental variables to test.conf"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 033bc82..356bb42 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,3 +1,6 @@
+[DEFAULT]
+# log_config = /opt/stack/tempest/etc/logging.conf.sample
+
[identity]
# This section contains configuration options that a variety of Tempest
# test clients use when authenticating with different user/tenant
diff --git a/openstack-common.conf b/openstack-common.conf
index 24af119..8568f22 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,9 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=install_venv_common
+module=install_venv_common
+module=lockutils
+module=log
# The base module to hold the copy of openstack.common
base=tempest
diff --git a/tempest/api/compute/__init__.py b/tempest/api/compute/__init__.py
index fb96b4a..fd26081 100644
--- a/tempest/api/compute/__init__.py
+++ b/tempest/api/compute/__init__.py
@@ -15,9 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest import config
from tempest.exceptions import InvalidConfiguration
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index a47e6c9..849cebb 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -48,7 +48,7 @@
resp, hosts = self.client.list_hosts(params)
self.assertEqual(200, resp.status)
self.assertTrue(len(hosts) >= 1)
- self.assertTrue(host in hosts)
+ self.assertIn(host, hosts)
@attr(type='negative')
def test_list_hosts_with_non_existent_zone(self):
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index a6b4e31..3e98029 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -44,13 +44,13 @@
cls.demo_tenant_id = [tnt['id'] for tnt in tenants if tnt['name']
== cls.config.identity.tenant_name][0]
- cls.default_quota_set = {'injected_file_content_bytes': 10240,
- 'metadata_items': 128, 'injected_files': 5,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'key_pairs': 100,
- 'injected_file_path_bytes': 255,
- 'instances': 10, 'security_group_rules': 20,
- 'cores': 20, 'security_groups': 10}
+ cls.default_quota_set = set(('injected_file_content_bytes',
+ 'metadata_items', 'injected_files',
+ 'ram', 'floating_ips',
+ 'fixed_ips', 'key_pairs',
+ 'injected_file_path_bytes',
+ 'instances', 'security_group_rules',
+ 'cores', 'security_groups'))
@classmethod
def tearDownClass(cls):
@@ -64,12 +64,13 @@
@attr(type='smoke')
def test_get_default_quotas(self):
# Admin can get the default resource quota set for a tenant
- expected_quota_set = self.default_quota_set.copy()
- expected_quota_set['id'] = self.demo_tenant_id
+ expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(
self.demo_tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.demo_tenant_id)
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type='gate')
@@ -105,24 +106,23 @@
@attr(type='gate')
def test_get_updated_quotas(self):
# Verify that GET shows the updated quota set
- self.adm_client.update_quota_set(self.demo_tenant_id,
- ram='5120')
- self.addCleanup(self.adm_client.update_quota_set,
- self.demo_tenant_id, **self.default_quota_set)
- try:
- resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
- self.assertEqual(200, resp.status)
- self.assertEqual(quota_set['ram'], 5120)
- except Exception:
- self.fail("Could not get the update quota limit for resource")
- finally:
- # Reset quota resource limits to default values
- resp, quota_set = self.adm_client.update_quota_set(
- self.demo_tenant_id,
- **self.default_quota_set)
- self.assertEqual(200, resp.status, "Failed to reset quota "
- "defaults")
+ tenant_name = rand_name('cpu_quota_tenant_')
+ tenant_desc = tenant_name + '-desc'
+ identity_client = self.os_adm.identity_client
+ _, tenant = identity_client.create_tenant(name=tenant_name,
+ description=tenant_desc)
+ tenant_id = tenant['id']
+ self.addCleanup(identity_client.delete_tenant,
+ tenant_id)
+ self.adm_client.update_quota_set(tenant_id,
+ ram='5120')
+ resp, quota_set = self.adm_client.get_quota_set(tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(quota_set['ram'], 5120)
+
+ #TODO(afazekas): Add dedicated tenant to the skiped quota tests
+ # it can be moved into the setUpClass as well
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type='gate')
def test_create_server_when_cpu_quota_is_full(self):
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 3b3c6ce..ce16353 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -64,7 +64,12 @@
service['host'] == host_name]
params = {'host': host_name}
resp, services = self.client.list_services(params)
- self.assertEqual(services_on_host, services)
+
+ # we could have a periodic job checkin between the 2 service
+ # lookups, so only compare binary lists.
+ s1 = map(lambda x: x['binary'], services)
+ s2 = map(lambda x: x['binary'], services_on_host)
+ self.assertEqual(s1, s2)
@attr(type=['negative', 'gate'])
def test_get_service_by_invalid_params(self):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 8ba074e..d40b0e0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -19,9 +19,9 @@
from tempest.api import compute
from tempest import clients
-from tempest.common import log as logging
from tempest.common.utils.data_utils import parse_image_id
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
import tempest.test
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 27526eb..51ce20c 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -35,14 +35,14 @@
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
'name': flavor['name']}
- self.assertTrue(flavor_min_detail in flavors)
+ self.assertIn(flavor_min_detail, flavors)
@attr(type='smoke')
def test_list_flavors_with_detail(self):
# Detailed list of all flavors should contain the expected flavor
resp, flavors = self.client.list_flavors_with_detail()
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertTrue(flavor in flavors)
+ self.assertIn(flavor, flavors)
@attr(type='smoke')
def test_get_flavor(self):
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 0d7f26d..a8ac7de 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -68,7 +68,7 @@
self.client.get_floating_ip_details(floating_ip_id_allocated)
#Checking if the details of allocated IP is in list of floating IP
resp, body = self.client.list_floating_ips()
- self.assertTrue(floating_ip_details in body)
+ self.assertIn(floating_ip_details, body)
finally:
#Deleting the floating IP which is created in this method
self.client.delete_floating_ip(floating_ip_id_allocated)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 3e1aa82..7e4e833 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -51,7 +51,7 @@
self.assertNotEqual(0, len(floating_ips),
"Expected floating IPs. Got zero.")
for i in range(3):
- self.assertTrue(self.floating_ip[i] in floating_ips)
+ self.assertIn(self.floating_ip[i], floating_ips)
@attr(type='gate')
def test_get_floating_ip_details(self):
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index b27d710..fb2906a 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -16,9 +16,9 @@
# under the License.
from tempest.api.compute import base
-from tempest.common import log as logging
from tempest.common.utils.data_utils import parse_image_id
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.test import attr
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 6abca3f..3bcf7b4 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -87,8 +87,8 @@
try:
resp, keypair_detail = self.client.get_keypair(k_name)
self.assertEqual(200, resp.status)
- self.assertTrue('name' in keypair_detail)
- self.assertTrue('public_key' in keypair_detail)
+ self.assertIn('name', keypair_detail)
+ self.assertIn('public_key', keypair_detail)
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index ab100a3..68be206 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -71,13 +71,13 @@
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
- self.assertTrue('id' in securitygroup)
+ self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
self.addCleanup(self._delete_security_group,
securitygroup_id)
self.assertEqual(200, resp.status)
self.assertFalse(securitygroup_id is None)
- self.assertTrue('name' in securitygroup)
+ self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
@@ -94,7 +94,7 @@
securitygroup['id'])
self.assertEqual(200, resp.status)
- self.assertTrue('name' in securitygroup)
+ self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 9fde618..edfafec 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -47,7 +47,7 @@
# reservation_id is not in the response body when the request send
# contains return_reservation_id=False
self.assertEqual('202', resp['status'])
- self.assertFalse('reservation_id' in body)
+ self.assertNotIn('reservation_id', body)
@attr(type=['negative', 'gate'])
def test_min_count_less_than_one(self):
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 291c8e4..4359c49 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -27,7 +27,7 @@
def test_list_extensions(self):
# List of all extensions
resp, extensions = self.extensions_client.list_extensions()
- self.assertTrue("extensions" in extensions)
+ self.assertIn("extensions", extensions)
self.assertEqual(200, resp.status)
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 1a8a40b..8014fca 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -30,31 +30,33 @@
resp, tenants = cls.admin_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.client.tenant_name][0]
- cls.default_quota_set = {'injected_file_content_bytes': 10240,
- 'metadata_items': 128, 'injected_files': 5,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'key_pairs': 100,
- 'injected_file_path_bytes': 255,
- 'instances': 10, 'security_group_rules': 20,
- 'cores': 20, 'security_groups': 10}
+ cls.default_quota_set = set(('injected_file_content_bytes',
+ 'metadata_items', 'injected_files',
+ 'ram', 'floating_ips',
+ 'fixed_ips', 'key_pairs',
+ 'injected_file_path_bytes',
+ 'instances', 'security_group_rules',
+ 'cores', 'security_groups'))
@attr(type='smoke')
def test_get_quotas(self):
# User can get the quota set for it's tenant
- expected_quota_set = self.default_quota_set.copy()
- expected_quota_set['id'] = self.tenant_id
+ expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.tenant_id)
@attr(type='smoke')
def test_get_default_quotas(self):
# User can get the default quota set for it's tenant
- expected_quota_set = self.default_quota_set.copy()
- expected_quota_set['id'] = self.tenant_id
+ expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.tenant_id)
class QuotasTestXML(QuotasTestJSON):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 6571491..e756870 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -91,7 +91,7 @@
linux_client = RemoteClient(server,
self.ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
- self.assertTrue(self.device in partitions)
+ self.assertIn(self.device, partitions)
self._detach(server['id'], volume['id'])
self.attached = False
@@ -105,7 +105,7 @@
linux_client = RemoteClient(server,
self.ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
- self.assertFalse(self.device in partitions)
+ self.assertNotIn(self.device, partitions)
except Exception:
self.fail("The test_attach_detach_volume is faild!")
finally:
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 363cd6a..f2dd93c 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -44,8 +44,8 @@
metadata=metadata)
self.addCleanup(self._delete_volume, volume)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('displayName' in volume)
+ self.assertIn('id', volume)
+ self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
@@ -80,8 +80,8 @@
metadata={})
self.addCleanup(self._delete_volume, volume)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('displayName' in volume)
+ self.assertIn('id', volume)
+ self.assertIn('displayName', volume)
#Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
#GET Volume
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index f1ef5a4..2ecf3e8 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -32,7 +32,7 @@
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_volume_get_nonexistant_volume_id(self):
# Negative: Should not be able to get details of nonexistant volume
#Creating a nonexistant volume id
@@ -48,7 +48,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_volume_delete_nonexistant_volume_id(self):
# Negative: Should not be able to delete nonexistant Volume
# Creating nonexistant volume id
@@ -64,7 +64,7 @@
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
non_exist_id)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_create_volume_with_invalid_size(self):
# Negative: Should not be able to create volume with invalid size
# in request
@@ -73,7 +73,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_create_volume_with_out_passing_size(self):
# Negative: Should not be able to create volume without passing size
# in request
@@ -82,7 +82,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_create_volume_with_size_zero(self):
# Negative: Should not be able to create volume with size zero
v_name = rand_name('Volume-')
@@ -90,25 +90,25 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_get_invalid_volume_id(self):
# Negative: Should not be able to get volume with invalid id
self.assertRaises(exceptions.NotFound,
self.client.get_volume, '#$%%&^&^')
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_get_volume_without_passing_volume_id(self):
# Negative: Should not be able to get volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_delete_invalid_volume_id(self):
# Negative: Should not be able to delete volume when invalid ID is
# passed
self.assertRaises(exceptions.NotFound,
self.client.delete_volume, '!@#$%^&*()')
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_delete_volume_without_passing_volume_id(self):
# Negative: Should not be able to delete volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/identity/__init__.py b/tempest/api/identity/__init__.py
index 718aa15..0ed47f5 100644
--- a/tempest/api/identity/__init__.py
+++ b/tempest/api/identity/__init__.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index 08b86ca..cc112cc 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -73,7 +73,7 @@
# Role should be created, verified, and deleted
role_name = rand_name('role-test-')
resp, body = self.client.create_role(role_name)
- self.assertTrue('status' in resp)
+ self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
self.assertEqual(role_name, body['name'])
@@ -82,7 +82,7 @@
self.assertTrue(any(found))
resp, body = self.client.delete_role(found[0]['id'])
- self.assertTrue('status' in resp)
+ self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
resp, body = self.client.list_roles()
@@ -100,7 +100,7 @@
role_name = rand_name('role-dup-')
resp, body = self.client.create_role(role_name)
role1_id = body.get('id')
- self.assertTrue('status' in resp)
+ self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
self.addCleanup(self.client.delete_role, role1_id)
self.assertRaises(exceptions.Duplicate, self.client.create_role,
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index 644853a..a590735 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -37,25 +37,25 @@
name, type, description=description)
self.assertTrue(resp['status'].startswith('2'))
#Verifying response body of create service
- self.assertTrue('id' in service_data)
+ self.assertIn('id', service_data)
self.assertFalse(service_data['id'] is None)
- self.assertTrue('name' in service_data)
+ self.assertIn('name', service_data)
self.assertEqual(name, service_data['name'])
- self.assertTrue('type' in service_data)
+ self.assertIn('type', service_data)
self.assertEqual(type, service_data['type'])
- self.assertTrue('description' in service_data)
+ self.assertIn('description', service_data)
self.assertEqual(description, service_data['description'])
#Get service
resp, fetched_service = self.client.get_service(service_data['id'])
self.assertTrue(resp['status'].startswith('2'))
#verifying the existence of service created
- self.assertTrue('id' in fetched_service)
+ self.assertIn('id', fetched_service)
self.assertEquals(fetched_service['id'], service_data['id'])
- self.assertTrue('name' in fetched_service)
+ self.assertIn('name', fetched_service)
self.assertEqual(fetched_service['name'], service_data['name'])
- self.assertTrue('type' in fetched_service)
+ self.assertIn('type', fetched_service)
self.assertEqual(fetched_service['type'], service_data['type'])
- self.assertTrue('description' in fetched_service)
+ self.assertIn('description', fetched_service)
self.assertEqual(fetched_service['description'],
service_data['description'])
finally:
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 0bba250..3a20081 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -242,7 +242,7 @@
self.data.users.append(user2)
#List of users for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
- self.assertTrue(resp['status'] in ('200', '203'))
+ self.assertIn(resp['status'], ('200', '203'))
for i in body:
fetched_user_ids.append(i['id'])
#verifying the user Id in the list
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 799b081..681db07 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -67,7 +67,7 @@
update_type = rand_name('UpdatedPolicyType-')
resp, data = self.policy_client.update_policy(
policy['id'], type=update_type)
- self.assertTrue('type' in data)
+ self.assertIn('type', data)
#Assertion for updated value with fetched value
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertIn('id', fetched_policy)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index e27ec13..086c50e 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -15,9 +15,9 @@
# under the License.
from tempest import clients
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.openstack.common import log as logging
import tempest.test
LOG = logging.getLogger(__name__)
@@ -28,11 +28,20 @@
@classmethod
def setUpClass(cls):
- cls.os = clients.Manager()
+ cls.isolated_creds = []
cls.created_images = []
+ cls._interface = 'json'
if not cls.config.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
+ if cls.config.compute.allow_tenant_isolation:
+ creds = cls._get_isolated_creds()
+ username, tenant_name, password = creds
+ cls.os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ else:
+ cls.os = clients.Manager()
@classmethod
def tearDownClass(cls):
@@ -44,6 +53,7 @@
for image_id in cls.created_images:
cls.client.wait_for_resource_deletion(image_id)
+ cls._clear_isolated_creds()
@classmethod
def create_image(cls, **kwargs):
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 640daa5..327df0f 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -45,7 +45,7 @@
disk_format='raw',
is_public=True,
properties=properties)
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertTrue(body.get('is_public'))
@@ -56,7 +56,7 @@
# Now try uploading an image file
image_file = StringIO.StringIO(('*' * 1024))
resp, body = self.client.update_image(image_id, data=image_file)
- self.assertTrue('size' in body)
+ self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@attr(type='gate')
@@ -69,7 +69,7 @@
'/someimage.iso',
properties={'key1': 'value1',
'key2': 'value2'})
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('active', body.get('status'))
@@ -83,7 +83,7 @@
container_format='bare',
disk_format='raw', is_public=True,
copy_from=self.config.images.http_image)
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertTrue(body.get('is_public'))
@@ -101,7 +101,7 @@
is_public=True,
min_ram=40,
properties=properties)
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
@@ -184,7 +184,7 @@
self.assertEqual(resp['status'], '200')
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
- self.assertTrue(image_id in image_list)
+ self.assertIn(image_id, image_list)
@attr(type='gate')
def test_index_disk_format(self):
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 34db6e3..7de7821 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -48,13 +48,13 @@
container_format='bare',
disk_format='raw',
visibility='public')
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
image_id = body.get('id')
- self.assertTrue('name' in body)
+ self.assertIn('name', body)
self.assertEqual('New Name', body.get('name'))
- self.assertTrue('visibility' in body)
+ self.assertIn('visibility', body)
self.assertTrue(body.get('visibility') == 'public')
- self.assertTrue('status' in body)
+ self.assertIn('status', body)
self.assertEqual('queued', body.get('status'))
# Now try uploading an image file
@@ -62,7 +62,7 @@
resp, body = self.client.store_image(image_id, image_file)
self.assertEqual(resp.status, 204)
resp, body = self.client.get_image_metadata(image_id)
- self.assertTrue('size' in body)
+ self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@@ -104,4 +104,4 @@
self.assertEqual(resp['status'], '200')
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
- self.assertTrue(image in image_list)
+ self.assertIn(image, image_list)
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 029f2d5..52b37c1 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -42,7 +42,7 @@
self.assertIsNotNone(container_list)
container_names = [c['name'] for c in container_list]
- self.assertTrue(self.container_name in container_names)
+ self.assertIn(self.container_name, container_names)
@attr(type='smoke')
def test_list_account_metadata(self):
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 5cb6341..8b9fc8c 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -37,7 +37,7 @@
container_name = rand_name(name='TestContainer')
resp, body = self.container_client.create_container(container_name)
self.containers.append(container_name)
- self.assertTrue(resp['status'] in ('202', '201'))
+ self.assertIn(resp['status'], ('202', '201'))
@attr(type='smoke')
def test_delete_container(self):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index ea8637c..5de4df0 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -67,9 +67,9 @@
(cont_client[1].base_url, str(cont[1]))}
resp, body = \
cont_client[0].put(str(cont[0]), body=None, headers=headers)
- self.assertTrue(resp['status'] in ('202', '201'),
- 'Error installing X-Container-Sync-To '
- 'for the container "%s"' % (cont[0]))
+ self.assertIn(resp['status'], ('202', '201'),
+ 'Error installing X-Container-Sync-To '
+ 'for the container "%s"' % (cont[0]))
# create object in container
object_name = rand_name(name='TestSyncObject')
data = object_name[::-1] # arbitrary_string()
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index a0b248c..d06d942 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -12,11 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
import time
from tempest import clients
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
import tempest.test
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
index 4f22158..7897b70 100644
--- a/tempest/api/orchestration/stacks/test_instance_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
@@ -13,13 +13,13 @@
# under the License.
import json
-from tempest.common import log as logging
import testtools
from tempest.api.orchestration import base
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
import tempest.config
+from tempest.openstack.common import log as logging
from tempest.test import attr
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 15979ed..f1f1f7e 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -12,10 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
-
from tempest.api.orchestration import base
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.test import attr
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 086b981..13d0d48 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -13,8 +13,8 @@
# under the License.
from tempest.api.volume import base
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.services.volume.json.admin import volume_types_client
from tempest.services.volume.json import volumes_client
from tempest.test import attr
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 3c4b5d8..27caaad 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -64,14 +64,14 @@
vol_type_name,
extra_specs=extra_specs)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in body)
- self.assertTrue('name' in body)
+ self.assertIn('id', body)
+ self.assertIn('name', body)
resp, volume = self.volumes_client.create_volume(
size=1, display_name=vol_name,
volume_type=vol_type_name)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('display_name' in volume)
+ self.assertIn('id', volume)
+ self.assertIn('display_name', volume)
self.assertEqual(volume['display_name'], vol_name,
"The created volume name is not equal "
"to the requested name")
@@ -113,8 +113,8 @@
name,
extra_specs=extra_specs)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in body)
- self.assertTrue('name' in body)
+ self.assertIn('id', body)
+ self.assertIn('name', body)
self.assertEqual(body['name'], name,
"The created volume_type name is not equal "
"to the requested name")
@@ -137,8 +137,8 @@
name,
extra_specs=extra_specs)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in body)
- self.assertTrue('name' in body)
+ self.assertIn('id', body)
+ self.assertIn('name', body)
self.assertEqual(body['name'], name,
"The created volume_type name is not equal "
"to the requested name")
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index a84f9e8..bb0047d 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -18,7 +18,7 @@
import time
from tempest import clients
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
import tempest.test
LOG = logging.getLogger(__name__)
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 56a3006..5861497 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -82,7 +82,7 @@
try:
resp, volume = self.client.get_volume(self.volume['id'])
self.assertEqual(200, resp.status)
- self.assertTrue('attachments' in volume)
+ self.assertIn('attachments', volume)
attachment = volume['attachments'][0]
self.assertEqual(mountpoint, attachment['device'])
self.assertEqual(self.server['id'], attachment['server_id'])
@@ -106,3 +106,4 @@
self.addCleanup(self.image_client.delete_image, image_id)
self.assertEqual(202, resp.status)
self.image_client.wait_for_image_status(image_id, 'active')
+ self.client.wait_for_volume_status(self.volume['id'], 'available')
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index eda7153..ee285db 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -40,8 +40,8 @@
metadata=metadata,
**kwargs)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('display_name' in volume)
+ self.assertIn('id', volume)
+ self.assertIn('display_name', volume)
self.assertEqual(volume['display_name'], v_name,
"The created volume name is not equal "
"to the requested name")
@@ -83,8 +83,8 @@
display_name=v_name,
metadata={})
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('display_name' in volume)
+ self.assertIn('id', volume)
+ self.assertIn('display_name', volume)
self.client.wait_for_volume_status(volume['id'], 'available')
#GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 602209a..0328b44 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -13,8 +13,8 @@
# under the License.
from tempest.api.volume import base
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.test import attr
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 0e1d6db..00e025d 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -22,7 +22,7 @@
from oslo.config import cfg
import tempest.cli.output_parser
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
import tempest.test
@@ -82,6 +82,11 @@
return self.cmd_with_auth(
'cinder', action, flags, params, admin, fail_ok)
+ def neutron(self, action, flags='', params='', admin=True, fail_ok=False):
+ """Executes neutron command for the given action."""
+ return self.cmd_with_auth(
+ 'neutron', action, flags, params, admin, fail_ok)
+
def cmd_with_auth(self, cmd, action, flags='', params='',
admin=True, fail_ok=False):
"""Executes given command with auth attributes appended."""
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index 3ee3098..bfd7f9e 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -17,11 +17,10 @@
"""Collection of utilities for parsing CLI clients output."""
-
-from tempest.common import log as logging
-
import re
+from tempest.openstack.common import log as logging
+
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index 5dadbeb..e60e238 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -21,7 +21,7 @@
import testtools
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
CONF = cfg.CONF
diff --git a/tempest/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
index 802a206..1848827 100644
--- a/tempest/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -18,7 +18,7 @@
import subprocess
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index fa77e8a..3d58451 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -19,7 +19,7 @@
import subprocess
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 3bc8b3e..4002081 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -19,7 +19,7 @@
import subprocess
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
new file mode 100644
index 0000000..4860090
--- /dev/null
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -0,0 +1,117 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import subprocess
+
+from oslo.config import cfg
+
+import tempest.cli
+from tempest.openstack.common import log as logging
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class SimpleReadOnlyNeutronClientTest(tempest.cli.ClientTestBase):
+ """Basic, read-only tests for Neutron CLI client.
+
+ Checks return values and output of read-only commands.
+ These tests do not presume any content, nor do they create
+ their own. They only verify the structure of output if present.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ if (not CONF.service_available.neutron):
+ msg = "Skiping all Neutron cli tests because it is not available"
+ raise cls.skipException(msg)
+ super(SimpleReadOnlyNeutronClientTest, cls).setUpClass()
+
+ def test_neutron_fake_action(self):
+ self.assertRaises(subprocess.CalledProcessError,
+ self.neutron,
+ 'this-does-not-exist')
+
+ def test_neutron_net_list(self):
+ self.neutron('net-list')
+
+ def test_neutron_ext_list(self):
+ ext = self.parser.listing(self.neutron('ext-list'))
+ self.assertTableStruct(ext, ['alias', 'name'])
+
+ def test_neutron_dhcp_agent_list_hosting_net(self):
+ self.neutron('dhcp-agent-list-hosting-net', params="private")
+
+ def test_neutron_agent_list(self):
+ agents = self.parser.listing(self.neutron('agent-list'))
+ field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
+ self.assertTableStruct(agents, field_names)
+
+ def test_neutron_floatingip_list(self):
+ self.neutron('floatingip-list')
+
+ def test_neutron_net_external_list(self):
+ self.neutron('net-external-list')
+
+ def test_neutron_port_list(self):
+ self.neutron('port-list')
+
+ def test_neutron_quota_list(self):
+ self.neutron('quota-list')
+
+ def test_neutron_router_list(self):
+ self.neutron('router-list')
+
+ def test_neutron_security_group_list(self):
+ security_grp = self.parser.listing(self.neutron('security-group-list'))
+ self.assertTableStruct(security_grp, ['id', 'name', 'description'])
+
+ def test_neutron_security_group_rule_list(self):
+ self.neutron('security-group-rule-list')
+
+ def test_neutron_subnet_list(self):
+ self.neutron('subnet-list')
+
+ def test_neutron_help(self):
+ help_text = self.neutron('help')
+ lines = help_text.split('\n')
+ self.assertTrue(lines[0].startswith('usage: neutron'))
+
+ commands = []
+ cmds_start = lines.index('Commands for API v2.0:')
+ command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
+ for line in lines[cmds_start:]:
+ match = command_pattern.match(line)
+ if match:
+ commands.append(match.group(1))
+ commands = set(commands)
+ wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
+ 'router-show', 'agent-update', 'help'))
+ self.assertFalse(wanted_commands - commands)
+
+ # Optional arguments:
+
+ def test_neutron_version(self):
+ self.neutron('', flags='--version')
+
+ def test_neutron_debug_net_list(self):
+ self.neutron('net-list', flags='--debug')
+
+ def test_neutron_quiet_net_list(self):
+ self.neutron('net-list', flags='--quiet')
diff --git a/tempest/clients.py b/tempest/clients.py
index 2154f8b..195cb89 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -15,9 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest import config
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services import botoclients
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
@@ -296,7 +296,8 @@
try:
self.servers_client = SERVERS_CLIENTS[interface](*client_args)
self.limits_client = LIMITS_CLIENTS[interface](*client_args)
- self.images_client = IMAGES_CLIENTS[interface](*client_args)
+ if self.config.service_available.glance:
+ self.images_client = IMAGES_CLIENTS[interface](*client_args)
self.keypairs_client = KEYPAIRS_CLIENTS[interface](*client_args)
self.quotas_client = QUOTAS_CLIENTS[interface](*client_args)
self.flavors_client = FLAVORS_CLIENTS[interface](*client_args)
@@ -341,8 +342,9 @@
self.network_client = NetworkClient(*client_args)
self.hosts_client = HostsClientJSON(*client_args)
self.account_client = AccountClient(*client_args)
- self.image_client = ImageClientJSON(*client_args)
- self.image_client_v2 = ImageClientV2JSON(*client_args)
+ if self.config.service_available.glance:
+ self.image_client = ImageClientJSON(*client_args)
+ self.image_client_v2 = ImageClientV2JSON(*client_args)
self.container_client = ContainerClient(*client_args)
self.object_client = ObjectClient(*client_args)
self.orchestration_client = OrchestrationClient(*client_args)
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index 4045430..831874d 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -34,9 +34,8 @@
import OpenSSL
-from tempest.common import log as logging
from tempest import exceptions as exc
-
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
USER_AGENT = 'tempest'
diff --git a/tempest/common/log.py b/tempest/common/log.py
deleted file mode 100644
index 2159bfe..0000000
--- a/tempest/common/log.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 NEC Corporation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import ConfigParser
-import inspect
-import logging
-import logging.config
-import os
-import re
-
-from oslo.config import cfg
-
-
-_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
-_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-_loggers = {}
-
-
-def getLogger(name='unknown'):
- if len(_loggers) == 0:
- loaded = _load_log_config()
- getLogger.adapter = TestsAdapter if loaded else None
-
- if name not in _loggers:
- logger = logging.getLogger(name)
- if getLogger.adapter:
- _loggers[name] = getLogger.adapter(logger, name)
- else:
- _loggers[name] = logger
-
- return _loggers[name]
-
-
-def _load_log_config():
- conf_dir = os.environ.get('TEMPEST_LOG_CONFIG_DIR', None)
- conf_file = os.environ.get('TEMPEST_LOG_CONFIG', None)
- if not conf_dir or not conf_file:
- return False
-
- log_config = os.path.join(conf_dir, conf_file)
- try:
- logging.config.fileConfig(log_config)
- except ConfigParser.Error as exc:
- raise cfg.ConfigFileParseError(log_config, str(exc))
- return True
-
-
-class TestsAdapter(logging.LoggerAdapter):
-
- def __init__(self, logger, project_name):
- self.logger = logger
- self.project = project_name
- self.regexp = re.compile(r"test_\w+\.py")
-
- def __getattr__(self, key):
- return getattr(self.logger, key)
-
- def _get_test_name(self):
- frames = inspect.stack()
- for frame in frames:
- binary_name = frame[1]
- if self.regexp.search(binary_name) and 'self' in frame[0].f_locals:
- return frame[0].f_locals.get('self').id()
- elif frame[3] == '_run_cleanups':
- #NOTE(myamazaki): method calling addCleanup
- return frame[0].f_locals.get('self').case.id()
- elif frame[3] in ['setUpClass', 'tearDownClass']:
- #NOTE(myamazaki): setUpClass or tearDownClass
- return "%s.%s.%s" % (frame[0].f_locals['cls'].__module__,
- frame[0].f_locals['cls'].__name__,
- frame[3])
- return None
-
- def process(self, msg, kwargs):
- if 'extra' not in kwargs:
- kwargs['extra'] = {}
- extra = kwargs['extra']
-
- test_name = self._get_test_name()
- if test_name:
- extra.update({'testname': test_name})
- extra['extra'] = extra.copy()
-
- return msg, kwargs
-
-
-class TestsFormatter(logging.Formatter):
- def __init__(self, fmt=None, datefmt=None):
- super(TestsFormatter, self).__init__()
- self.default_format = _DEFAULT_LOG_FORMAT
- self.testname_format =\
- "%(asctime)s %(levelname)8s [%(testname)s] %(message)s"
- self.datefmt = _DEFAULT_LOG_DATE_FORMAT
-
- def format(self, record):
- extra = record.__dict__.get('extra', None)
- if extra and 'testname' in extra:
- self._fmt = self.testname_format
- else:
- self._fmt = self.default_format
- return logging.Formatter.format(self, record)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index e94455d..09b87b2 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -24,8 +24,8 @@
import re
import time
-from tempest.common import log as logging
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import xml_to_json
# redrive rate limited calls at most twice
diff --git a/tempest/config.py b/tempest/config.py
index a918d0b..19170ae 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -23,10 +23,9 @@
from oslo.config import cfg
-from tempest.common import log as logging
from tempest.common.utils.misc import singleton
+from tempest.openstack.common import log as logging
-LOG = logging.getLogger(__name__)
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
@@ -603,7 +602,6 @@
def __init__(self):
"""Initialize a configuration from a conf directory and conf file."""
config_files = []
-
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
# Environment variables override defaults...
@@ -618,8 +616,6 @@
'TEMPEST_CONFIG' in os.environ):
path = failsafe_path
- LOG.info("Using tempest config file %s" % path)
-
if not os.path.exists(path):
msg = "Config file %s not found" % path
print(RuntimeError(msg), file=sys.stderr)
@@ -627,6 +623,9 @@
config_files.append(path)
cfg.CONF([], project='tempest', default_config_files=config_files)
+ logging.setup('tempest')
+ LOG = logging.getLogger('tempest')
+ LOG.info("Using tempest config file %s" % path)
register_compute_opts(cfg.CONF)
register_identity_opts(cfg.CONF)
diff --git a/tempest/manager.py b/tempest/manager.py
index 187e2c6..54a0dec 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -15,41 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
import tempest.config
from tempest import exceptions
-# Tempest REST Fuzz testing client libs
-from tempest.services.compute.json import extensions_client
-from tempest.services.compute.json import flavors_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import hypervisor_client
-from tempest.services.compute.json import images_client
-from tempest.services.compute.json import keypairs_client
-from tempest.services.compute.json import limits_client
-from tempest.services.compute.json import quotas_client
-from tempest.services.compute.json import security_groups_client
-from tempest.services.compute.json import servers_client
-from tempest.services.compute.json import volumes_extensions_client
-from tempest.services.network.json import network_client
-from tempest.services.volume.json import snapshots_client
-from tempest.services.volume.json import volumes_client
-
-NetworkClient = network_client.NetworkClient
-ImagesClient = images_client.ImagesClientJSON
-FlavorsClient = flavors_client.FlavorsClientJSON
-ServersClient = servers_client.ServersClientJSON
-LimitsClient = limits_client.LimitsClientJSON
-ExtensionsClient = extensions_client.ExtensionsClientJSON
-FloatingIPsClient = floating_ips_client.FloatingIPsClientJSON
-SecurityGroupsClient = security_groups_client.SecurityGroupsClientJSON
-KeyPairsClient = keypairs_client.KeyPairsClientJSON
-VolumesExtensionsClient = volumes_extensions_client.VolumesExtensionsClientJSON
-VolumesClient = volumes_client.VolumesClientJSON
-SnapshotsClient = snapshots_client.SnapshotsClientJSON
-QuotasClient = quotas_client.QuotasClientJSON
-HypervisorClient = hypervisor_client.HypervisorClientJSON
-
-LOG = logging.getLogger(__name__)
class Manager(object):
@@ -73,97 +40,3 @@
"tenant_name: %(t)s" %
{'u': username, 'p': password, 't': tenant_name})
raise exceptions.InvalidConfiguration(msg)
-
-
-class FuzzClientManager(Manager):
-
- """
- Manager class that indicates the client provided by the manager
- is a fuzz-testing client that Tempest contains. These fuzz-testing
- clients are used to be able to throw random or invalid data at
- an endpoint and check for appropriate error messages returned
- from the endpoint.
- """
- pass
-
-
-class ComputeFuzzClientManager(FuzzClientManager):
-
- """
- Manager that uses the Tempest REST client that can send
- random or invalid data at the OpenStack Compute API
- """
-
- def __init__(self, username=None, password=None, tenant_name=None):
- """
- We allow overriding of the credentials used within the various
- client classes managed by the Manager object. Left as None, the
- standard username/password/tenant_name is used.
-
- :param username: Override of the username
- :param password: Override of the password
- :param tenant_name: Override of the tenant name
- """
- super(ComputeFuzzClientManager, self).__init__()
-
- # If no creds are provided, we fall back on the defaults
- # in the config file for the Compute API.
- username = username or self.config.identity.username
- password = password or self.config.identity.password
- tenant_name = tenant_name or self.config.identity.tenant_name
-
- self._validate_credentials(username, password, tenant_name)
-
- auth_url = self.config.identity.uri
-
- # Ensure /tokens is in the URL for Keystone...
- if 'tokens' not in auth_url:
- auth_url = auth_url.rstrip('/') + '/tokens'
-
- client_args = (self.config, username, password, auth_url,
- tenant_name)
-
- self.servers_client = ServersClient(*client_args)
- self.flavors_client = FlavorsClient(*client_args)
- self.images_client = ImagesClient(*client_args)
- self.limits_client = LimitsClient(*client_args)
- self.extensions_client = ExtensionsClient(*client_args)
- self.keypairs_client = KeyPairsClient(*client_args)
- self.security_groups_client = SecurityGroupsClient(*client_args)
- self.floating_ips_client = FloatingIPsClient(*client_args)
- self.volumes_extensions_client = VolumesExtensionsClient(*client_args)
- self.volumes_client = VolumesClient(*client_args)
- self.snapshots_client = SnapshotsClient(*client_args)
- self.quotas_client = QuotasClient(*client_args)
- self.network_client = NetworkClient(*client_args)
- self.hypervisor_client = HypervisorClient(*client_args)
-
-
-class ComputeFuzzClientAltManager(Manager):
-
- """
- Manager object that uses the alt_XXX credentials for its
- managed client objects
- """
-
- def __init__(self):
- conf = tempest.config.TempestConfig()
- super(ComputeFuzzClientAltManager, self).__init__(
- conf.identity.alt_username,
- conf.identity.alt_password,
- conf.identity.alt_tenant_name)
-
-
-class ComputeFuzzClientAdminManager(Manager):
-
- """
- Manager object that uses the alt_XXX credentials for its
- managed client objects
- """
-
- def __init__(self):
- conf = tempest.config.TempestConfig()
- super(ComputeFuzzClientAdminManager, self).__init__(
- conf.compute_admin.username,
- conf.compute_admin.password,
- conf.compute_admin.tenant_name)
diff --git a/tempest/openstack/common/__init__.py b/tempest/openstack/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/openstack/common/__init__.py
diff --git a/tempest/openstack/common/excutils.py b/tempest/openstack/common/excutils.py
new file mode 100644
index 0000000..81aad14
--- /dev/null
+++ b/tempest/openstack/common/excutils.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception related utilities.
+"""
+
+import logging
+import sys
+import time
+import traceback
+
+from tempest.openstack.common.gettextutils import _ # noqa
+
+
+class save_and_reraise_exception(object):
+ """Save current exception, run some code and then re-raise.
+
+ In some cases the exception context can be cleared, resulting in None
+ being attempted to be re-raised after an exception handler is run. This
+ can happen when eventlet switches greenthreads or when running an
+ exception handler, code raises and catches an exception. In both
+ cases the exception context will be cleared.
+
+ To work around this, we save the exception state, run handler code, and
+ then re-raise the original exception. If another exception occurs, the
+ saved exception is logged and the new exception is re-raised.
+
+ In some cases the caller may not want to re-raise the exception, and
+ for those circumstances this context provides a reraise flag that
+ can be used to suppress the exception. For example:
+
+ except Exception:
+ with save_and_reraise_exception() as ctxt:
+ decide_if_need_reraise()
+ if not should_be_reraised:
+ ctxt.reraise = False
+ """
+ def __init__(self):
+ self.reraise = True
+
+ def __enter__(self):
+ self.type_, self.value, self.tb, = sys.exc_info()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is not None:
+ logging.error(_('Original exception being dropped: %s'),
+ traceback.format_exception(self.type_,
+ self.value,
+ self.tb))
+ return False
+ if self.reraise:
+ raise self.type_, self.value, self.tb
+
+
+def forever_retry_uncaught_exceptions(infunc):
+ def inner_func(*args, **kwargs):
+ last_log_time = 0
+ last_exc_message = None
+ exc_count = 0
+ while True:
+ try:
+ return infunc(*args, **kwargs)
+ except Exception as exc:
+ if exc.message == last_exc_message:
+ exc_count += 1
+ else:
+ exc_count = 1
+ # Do not log any more frequently than once a minute unless
+ # the exception message changes
+ cur_time = int(time.time())
+ if (cur_time - last_log_time > 60 or
+ exc.message != last_exc_message):
+ logging.exception(
+ _('Unexpected exception occurred %d time(s)... '
+ 'retrying.') % exc_count)
+ last_log_time = cur_time
+ last_exc_message = exc.message
+ exc_count = 0
+ # This should be a very rare event. In case it isn't, do
+ # a sleep.
+ time.sleep(1)
+ return inner_func
diff --git a/tempest/openstack/common/fileutils.py b/tempest/openstack/common/fileutils.py
new file mode 100644
index 0000000..d2e3d3e
--- /dev/null
+++ b/tempest/openstack/common/fileutils.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import errno
+import os
+
+from tempest.openstack.common import excutils
+from tempest.openstack.common.gettextutils import _ # noqa
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+_FILE_CACHE = {}
+
+
+def ensure_tree(path):
+ """Create a directory (and any ancestor directories required)
+
+ :param path: Directory to create
+ """
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ if not os.path.isdir(path):
+ raise
+ else:
+ raise
+
+
+def read_cached_file(filename, force_reload=False):
+ """Read from a file if it has been modified.
+
+ :param force_reload: Whether to reload the file.
+ :returns: A tuple with a boolean specifying if the data is fresh
+ or not.
+ """
+ global _FILE_CACHE
+
+ if force_reload and filename in _FILE_CACHE:
+ del _FILE_CACHE[filename]
+
+ reloaded = False
+ mtime = os.path.getmtime(filename)
+ cache_info = _FILE_CACHE.setdefault(filename, {})
+
+ if not cache_info or mtime > cache_info.get('mtime', 0):
+ LOG.debug(_("Reloading cached file %s") % filename)
+ with open(filename) as fap:
+ cache_info['data'] = fap.read()
+ cache_info['mtime'] = mtime
+ reloaded = True
+ return (reloaded, cache_info['data'])
+
+
+def delete_if_exists(path):
+ """Delete a file, but ignore file not found error.
+
+ :param path: File to delete
+ """
+
+ try:
+ os.unlink(path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path):
+ """Protect code that wants to operate on PATH atomically.
+ Any exception will cause PATH to be removed.
+
+ :param path: File to work with
+ """
+ try:
+ yield
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ delete_if_exists(path)
+
+
+def file_open(*args, **kwargs):
+ """Open file
+
+ see built-in file() documentation for more details
+
+ Note: The reason this is kept in a separate module is to easily
+ be able to provide a stub module that doesn't alter system
+ state at all (for unit tests)
+ """
+ return file(*args, **kwargs)
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
new file mode 100644
index 0000000..8594937
--- /dev/null
+++ b/tempest/openstack/common/gettextutils.py
@@ -0,0 +1,259 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+# All Rights Reserved.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+gettext for openstack-common modules.
+
+Usual usage in an openstack.common module:
+
+ from tempest.openstack.common.gettextutils import _
+"""
+
+import copy
+import gettext
+import logging.handlers
+import os
+import re
+import UserString
+
+import six
+
+_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
+_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
+
+
+def _(msg):
+ return _t.ugettext(msg)
+
+
+def install(domain):
+ """Install a _() function using the given translation domain.
+
+ Given a translation domain, install a _() function using gettext's
+ install() function.
+
+ The main difference from gettext.install() is that we allow
+ overriding the default localedir (e.g. /usr/share/locale) using
+ a translation-domain-specific environment variable (e.g.
+ NOVA_LOCALEDIR).
+ """
+ gettext.install(domain,
+ localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
+ unicode=True)
+
+
+"""
+Lazy gettext functionality.
+
+The following is an attempt to introduce a deferred way
+to do translations on messages in OpenStack. We attempt to
+override the standard _() function and % (format string) operation
+to build Message objects that can later be translated when we have
+more information. Also included is an example LogHandler that
+translates Messages to an associated locale, effectively allowing
+many logs, each with their own locale.
+"""
+
+
+def get_lazy_gettext(domain):
+ """Assemble and return a lazy gettext function for a given domain.
+
+ Factory method for a project/module to get a lazy gettext function
+ for its own translation domain (i.e. nova, glance, cinder, etc.)
+ """
+
+ def _lazy_gettext(msg):
+ """Create and return a Message object.
+
+ Message encapsulates a string so that we can translate it later when
+ needed.
+ """
+ return Message(msg, domain)
+
+ return _lazy_gettext
+
+
+class Message(UserString.UserString, object):
+ """Class used to encapsulate translatable messages."""
+ def __init__(self, msg, domain):
+ # _msg is the gettext msgid and should never change
+ self._msg = msg
+ self._left_extra_msg = ''
+ self._right_extra_msg = ''
+ self.params = None
+ self.locale = None
+ self.domain = domain
+
+ @property
+ def data(self):
+ # NOTE(mrodden): this should always resolve to a unicode string
+ # that best represents the state of the message currently
+
+ localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
+ if self.locale:
+ lang = gettext.translation(self.domain,
+ localedir=localedir,
+ languages=[self.locale],
+ fallback=True)
+ else:
+ # use system locale for translations
+ lang = gettext.translation(self.domain,
+ localedir=localedir,
+ fallback=True)
+
+ full_msg = (self._left_extra_msg +
+ lang.ugettext(self._msg) +
+ self._right_extra_msg)
+
+ if self.params is not None:
+ full_msg = full_msg % self.params
+
+ return six.text_type(full_msg)
+
+ def _save_dictionary_parameter(self, dict_param):
+ full_msg = self.data
+ # look for %(blah) fields in string;
+ # ignore %% and deal with the
+ # case where % is first character on the line
+ keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
+
+ # if we don't find any %(blah) blocks but have a %s
+ if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
+ # apparently the full dictionary is the parameter
+ params = copy.deepcopy(dict_param)
+ else:
+ params = {}
+ for key in keys:
+ try:
+ params[key] = copy.deepcopy(dict_param[key])
+ except TypeError:
+ # cast uncopyable thing to unicode string
+ params[key] = unicode(dict_param[key])
+
+ return params
+
+ def _save_parameters(self, other):
+ # we check for None later to see if
+ # we actually have parameters to inject,
+ # so encapsulate if our parameter is actually None
+ if other is None:
+ self.params = (other, )
+ elif isinstance(other, dict):
+ self.params = self._save_dictionary_parameter(other)
+ else:
+ # fallback to casting to unicode,
+ # this will handle the problematic python code-like
+ # objects that cannot be deep-copied
+ try:
+ self.params = copy.deepcopy(other)
+ except TypeError:
+ self.params = unicode(other)
+
+ return self
+
+ # overrides to be more string-like
+ def __unicode__(self):
+ return self.data
+
+ def __str__(self):
+ return self.data.encode('utf-8')
+
+ def __getstate__(self):
+ to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
+ 'domain', 'params', 'locale']
+ new_dict = self.__dict__.fromkeys(to_copy)
+ for attr in to_copy:
+ new_dict[attr] = copy.deepcopy(self.__dict__[attr])
+
+ return new_dict
+
+ def __setstate__(self, state):
+ for (k, v) in state.items():
+ setattr(self, k, v)
+
+ # operator overloads
+ def __add__(self, other):
+ copied = copy.deepcopy(self)
+ copied._right_extra_msg += other.__str__()
+ return copied
+
+ def __radd__(self, other):
+ copied = copy.deepcopy(self)
+ copied._left_extra_msg += other.__str__()
+ return copied
+
+ def __mod__(self, other):
+ # do a format string to catch and raise
+ # any possible KeyErrors from missing parameters
+ self.data % other
+ copied = copy.deepcopy(self)
+ return copied._save_parameters(other)
+
+ def __mul__(self, other):
+ return self.data * other
+
+ def __rmul__(self, other):
+ return other * self.data
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __getslice__(self, start, end):
+ return self.data.__getslice__(start, end)
+
+ def __getattribute__(self, name):
+ # NOTE(mrodden): handle lossy operations that we can't deal with yet
+ # These override the UserString implementation, since UserString
+ # uses our __class__ attribute to try and build a new message
+ # after running the inner data string through the operation.
+ # At that point, we have lost the gettext message id and can just
+ # safely resolve to a string instead.
+ ops = ['capitalize', 'center', 'decode', 'encode',
+ 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
+ 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
+ if name in ops:
+ return getattr(self.data, name)
+ else:
+ return UserString.UserString.__getattribute__(self, name)
+
+
+class LocaleHandler(logging.Handler):
+ """Handler that can have a locale associated to translate Messages.
+
+ A quick example of how to utilize the Message class above.
+ LocaleHandler takes a locale and a target logging.Handler object
+ to forward LogRecord objects to after translating the internal Message.
+ """
+
+ def __init__(self, locale, target):
+ """Initialize a LocaleHandler
+
+ :param locale: locale to use for translating messages
+ :param target: logging.Handler object to forward
+ LogRecord objects to after translation
+ """
+ logging.Handler.__init__(self)
+ self.locale = locale
+ self.target = target
+
+ def emit(self, record):
+ if isinstance(record.msg, Message):
+ # set the locale and resolve to a string
+ record.msg.locale = self.locale
+
+ self.target.emit(record)
diff --git a/tempest/openstack/common/importutils.py b/tempest/openstack/common/importutils.py
new file mode 100644
index 0000000..7a303f9
--- /dev/null
+++ b/tempest/openstack/common/importutils.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Import related utilities and helper functions.
+"""
+
+import sys
+import traceback
+
+
+def import_class(import_str):
+ """Returns a class from a string including module and class."""
+ mod_str, _sep, class_str = import_str.rpartition('.')
+ try:
+ __import__(mod_str)
+ return getattr(sys.modules[mod_str], class_str)
+ except (ValueError, AttributeError):
+ raise ImportError('Class %s cannot be found (%s)' %
+ (class_str,
+ traceback.format_exception(*sys.exc_info())))
+
+
+def import_object(import_str, *args, **kwargs):
+ """Import a class and return an instance of it."""
+ return import_class(import_str)(*args, **kwargs)
+
+
+def import_object_ns(name_space, import_str, *args, **kwargs):
+ """Tries to import object from default namespace.
+
+ Imports a class and return an instance of it, first by trying
+ to find the class in a default namespace, then failing back to
+ a full path if not found in the default namespace.
+ """
+ import_value = "%s.%s" % (name_space, import_str)
+ try:
+ return import_class(import_value)(*args, **kwargs)
+ except ImportError:
+ return import_class(import_str)(*args, **kwargs)
+
+
+def import_module(import_str):
+ """Import a module."""
+ __import__(import_str)
+ return sys.modules[import_str]
+
+
+def try_import(import_str, default=None):
+ """Try to import a module and if it fails return default."""
+ try:
+ return import_module(import_str)
+ except ImportError:
+ return default
diff --git a/tempest/openstack/common/jsonutils.py b/tempest/openstack/common/jsonutils.py
new file mode 100644
index 0000000..bd43e59
--- /dev/null
+++ b/tempest/openstack/common/jsonutils.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''
+JSON related utilities.
+
+This module provides a few things:
+
+ 1) A handy function for getting an object down to something that can be
+ JSON serialized. See to_primitive().
+
+ 2) Wrappers around loads() and dumps(). The dumps() wrapper will
+ automatically use to_primitive() for you if needed.
+
+ 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
+ is available.
+'''
+
+
+import datetime
+import functools
+import inspect
+import itertools
+import json
+import types
+import xmlrpclib
+
+import netaddr
+import six
+
+from tempest.openstack.common import timeutils
+
+
+_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
+ inspect.isfunction, inspect.isgeneratorfunction,
+ inspect.isgenerator, inspect.istraceback, inspect.isframe,
+ inspect.iscode, inspect.isbuiltin, inspect.isroutine,
+ inspect.isabstract]
+
+_simple_types = (types.NoneType, int, basestring, bool, float, long)
+
+
+def to_primitive(value, convert_instances=False, convert_datetime=True,
+ level=0, max_depth=3):
+ """Convert a complex object into primitives.
+
+ Handy for JSON serialization. We can optionally handle instances,
+ but since this is a recursive function, we could have cyclical
+ data structures.
+
+ To handle cyclical data structures we could track the actual objects
+ visited in a set, but not all objects are hashable. Instead we just
+ track the depth of the object inspections and don't go too deep.
+
+ Therefore, convert_instances=True is lossy ... be aware.
+
+ """
+ # handle obvious types first - order of basic types determined by running
+ # full tests on nova project, resulting in the following counts:
+ # 572754 <type 'NoneType'>
+ # 460353 <type 'int'>
+ # 379632 <type 'unicode'>
+ # 274610 <type 'str'>
+ # 199918 <type 'dict'>
+ # 114200 <type 'datetime.datetime'>
+ # 51817 <type 'bool'>
+ # 26164 <type 'list'>
+ # 6491 <type 'float'>
+ # 283 <type 'tuple'>
+ # 19 <type 'long'>
+ if isinstance(value, _simple_types):
+ return value
+
+ if isinstance(value, datetime.datetime):
+ if convert_datetime:
+ return timeutils.strtime(value)
+ else:
+ return value
+
+ # value of itertools.count doesn't get caught by nasty_type_tests
+ # and results in infinite loop when list(value) is called.
+ if type(value) == itertools.count:
+ return six.text_type(value)
+
+ # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
+ # tests that raise an exception in a mocked method that
+ # has a @wrap_exception with a notifier will fail. If
+ # we up the dependency to 0.5.4 (when it is released) we
+ # can remove this workaround.
+ if getattr(value, '__module__', None) == 'mox':
+ return 'mock'
+
+ if level > max_depth:
+ return '?'
+
+ # The try block may not be necessary after the class check above,
+ # but just in case ...
+ try:
+ recursive = functools.partial(to_primitive,
+ convert_instances=convert_instances,
+ convert_datetime=convert_datetime,
+ level=level,
+ max_depth=max_depth)
+ if isinstance(value, dict):
+ return dict((k, recursive(v)) for k, v in value.iteritems())
+ elif isinstance(value, (list, tuple)):
+ return [recursive(lv) for lv in value]
+
+ # It's not clear why xmlrpclib created their own DateTime type, but
+ # for our purposes, make it a datetime type which is explicitly
+ # handled
+ if isinstance(value, xmlrpclib.DateTime):
+ value = datetime.datetime(*tuple(value.timetuple())[:6])
+
+ if convert_datetime and isinstance(value, datetime.datetime):
+ return timeutils.strtime(value)
+ elif hasattr(value, 'iteritems'):
+ return recursive(dict(value.iteritems()), level=level + 1)
+ elif hasattr(value, '__iter__'):
+ return recursive(list(value))
+ elif convert_instances and hasattr(value, '__dict__'):
+ # Likely an instance of something. Watch for cycles.
+ # Ignore class member vars.
+ return recursive(value.__dict__, level=level + 1)
+ elif isinstance(value, netaddr.IPAddress):
+ return six.text_type(value)
+ else:
+ if any(test(value) for test in _nasty_type_tests):
+ return six.text_type(value)
+ return value
+ except TypeError:
+ # Class objects are tricky since they may define something like
+ # __iter__ defined but it isn't callable as list().
+ return six.text_type(value)
+
+
+def dumps(value, default=to_primitive, **kwargs):
+ return json.dumps(value, default=default, **kwargs)
+
+
+def loads(s):
+ return json.loads(s)
+
+
+def load(s):
+ return json.load(s)
+
+
+try:
+ import anyjson
+except ImportError:
+ pass
+else:
+ anyjson._modules.append((__name__, 'dumps', TypeError,
+ 'loads', ValueError, 'load'))
+ anyjson.force_implementation(__name__)
diff --git a/tempest/openstack/common/local.py b/tempest/openstack/common/local.py
new file mode 100644
index 0000000..f1bfc82
--- /dev/null
+++ b/tempest/openstack/common/local.py
@@ -0,0 +1,48 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Greenthread local storage of variables using weak references"""
+
+import weakref
+
+from eventlet import corolocal
+
+
+class WeakLocal(corolocal.local):
+ def __getattribute__(self, attr):
+ rval = corolocal.local.__getattribute__(self, attr)
+ if rval:
+ # NOTE(mikal): this bit is confusing. What is stored is a weak
+ # reference, not the value itself. We therefore need to lookup
+ # the weak reference and return the inner value here.
+ rval = rval()
+ return rval
+
+ def __setattr__(self, attr, value):
+ value = weakref.ref(value)
+ return corolocal.local.__setattr__(self, attr, value)
+
+
+# NOTE(mikal): the name "store" should be deprecated in the future
+store = WeakLocal()
+
+# A "weak" store uses weak references and allows an object to fall out of scope
+# when it falls out of scope in the code that uses the thread local storage. A
+# "strong" store will hold a reference to the object so that it never falls out
+# of scope.
+weak_store = WeakLocal()
+strong_store = corolocal.local
diff --git a/tempest/openstack/common/lockutils.py b/tempest/openstack/common/lockutils.py
new file mode 100644
index 0000000..3ff1a7a
--- /dev/null
+++ b/tempest/openstack/common/lockutils.py
@@ -0,0 +1,276 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import errno
+import functools
+import os
+import time
+import weakref
+
+from eventlet import semaphore
+from oslo.config import cfg
+
+from tempest.openstack.common import fileutils
+from tempest.openstack.common.gettextutils import _ # noqa
+from tempest.openstack.common import local
+from tempest.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+util_opts = [
+ cfg.BoolOpt('disable_process_locking', default=False,
+ help='Whether to disable inter-process locks'),
+ cfg.StrOpt('lock_path',
+ help=('Directory to use for lock files.'))
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(util_opts)
+
+
+def set_defaults(lock_path):
+ cfg.set_defaults(util_opts, lock_path=lock_path)
+
+
+class _InterProcessLock(object):
+ """Lock implementation which allows multiple locks, working around
+ issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
+ not require any cleanup. Since the lock is always held on a file
+ descriptor rather than outside of the process, the lock gets dropped
+ automatically if the process crashes, even if __exit__ is not executed.
+
+ There are no guarantees regarding usage by multiple green threads in a
+ single process here. This lock works only between processes. Exclusive
+ access between local threads should be achieved using the semaphores
+ in the @synchronized decorator.
+
+ Note these locks are released when the descriptor is closed, so it's not
+ safe to close the file descriptor while another green thread holds the
+ lock. Just opening and closing the lock file can break synchronisation,
+ so lock files must be accessed only using this abstraction.
+ """
+
+ def __init__(self, name):
+ self.lockfile = None
+ self.fname = name
+
+ def __enter__(self):
+ self.lockfile = open(self.fname, 'w')
+
+ while True:
+ try:
+ # Using non-blocking locks since green threads are not
+ # patched to deal with blocking locking calls.
+ # Also upon reading the MSDN docs for locking(), it seems
+ # to have a laughable 10 attempts "blocking" mechanism.
+ self.trylock()
+ return self
+ except IOError as e:
+ if e.errno in (errno.EACCES, errno.EAGAIN):
+ # external locks synchronise things like iptables
+ # updates - give it some time to prevent busy spinning
+ time.sleep(0.01)
+ else:
+ raise
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ try:
+ self.unlock()
+ self.lockfile.close()
+ except IOError:
+ LOG.exception(_("Could not release the acquired lock `%s`"),
+ self.fname)
+
+ def trylock(self):
+ raise NotImplementedError()
+
+ def unlock(self):
+ raise NotImplementedError()
+
+
+class _WindowsLock(_InterProcessLock):
+ def trylock(self):
+ msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
+
+ def unlock(self):
+ msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
+
+
+class _PosixLock(_InterProcessLock):
+ def trylock(self):
+ fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+ def unlock(self):
+ fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
+
+
+if os.name == 'nt':
+ import msvcrt
+ InterProcessLock = _WindowsLock
+else:
+ import fcntl
+ InterProcessLock = _PosixLock
+
+_semaphores = weakref.WeakValueDictionary()
+
+
+@contextlib.contextmanager
+def lock(name, lock_file_prefix=None, external=False, lock_path=None):
+ """Context based lock
+
+ This function yields a `semaphore.Semaphore` instance unless external is
+ True, in which case, it'll yield an InterProcessLock instance.
+
+ :param lock_file_prefix: The lock_file_prefix argument is used to provide
+ lock files on disk with a meaningful prefix.
+
+ :param external: The external keyword argument denotes whether this lock
+ should work across multiple processes. This means that if two different
+ workers both run a a method decorated with @synchronized('mylock',
+ external=True), only one of them will execute at a time.
+
+ :param lock_path: The lock_path keyword argument is used to specify a
+ special location for external lock files to live. If nothing is set, then
+ CONF.lock_path is used as a default.
+ """
+ # NOTE(soren): If we ever go natively threaded, this will be racy.
+ # See http://stackoverflow.com/questions/5390569/dyn
+ # amically-allocating-and-destroying-mutexes
+ sem = _semaphores.get(name, semaphore.Semaphore())
+ if name not in _semaphores:
+ # this check is not racy - we're already holding ref locally
+ # so GC won't remove the item and there was no IO switch
+ # (only valid in greenthreads)
+ _semaphores[name] = sem
+
+ with sem:
+ LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
+
+ # NOTE(mikal): I know this looks odd
+ if not hasattr(local.strong_store, 'locks_held'):
+ local.strong_store.locks_held = []
+ local.strong_store.locks_held.append(name)
+
+ try:
+ if external and not CONF.disable_process_locking:
+ LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
+ {'lock': name})
+
+ # We need a copy of lock_path because it is non-local
+ local_lock_path = lock_path or CONF.lock_path
+ if not local_lock_path:
+ raise cfg.RequiredOptError('lock_path')
+
+ if not os.path.exists(local_lock_path):
+ fileutils.ensure_tree(local_lock_path)
+ LOG.info(_('Created lock path: %s'), local_lock_path)
+
+ def add_prefix(name, prefix):
+ if not prefix:
+ return name
+ sep = '' if prefix.endswith('-') else '-'
+ return '%s%s%s' % (prefix, sep, name)
+
+ # NOTE(mikal): the lock name cannot contain directory
+ # separators
+ lock_file_name = add_prefix(name.replace(os.sep, '_'),
+ lock_file_prefix)
+
+ lock_file_path = os.path.join(local_lock_path, lock_file_name)
+
+ try:
+ lock = InterProcessLock(lock_file_path)
+ with lock as lock:
+ LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ yield lock
+ finally:
+ LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ else:
+ yield sem
+
+ finally:
+ local.strong_store.locks_held.remove(name)
+
+
+def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
+ """Synchronization decorator.
+
+ Decorating a method like so::
+
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ ensures that only one thread will execute the foo method at a time.
+
+ Different methods can share the same lock::
+
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ @synchronized('mylock')
+ def bar(self, *args):
+ ...
+
+ This way only one of either foo or bar can be executing at a time.
+ """
+
+ def wrap(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ with lock(name, lock_file_prefix, external, lock_path):
+ LOG.debug(_('Got semaphore / lock "%(function)s"'),
+ {'function': f.__name__})
+ return f(*args, **kwargs)
+
+ LOG.debug(_('Semaphore / lock released "%(function)s"'),
+ {'function': f.__name__})
+ return inner
+ return wrap
+
+
+def synchronized_with_prefix(lock_file_prefix):
+ """Partial object generator for the synchronization decorator.
+
+ Redefine @synchronized in each project like so::
+
+ (in nova/utils.py)
+ from nova.openstack.common import lockutils
+
+ synchronized = lockutils.synchronized_with_prefix('nova-')
+
+
+ (in nova/foo.py)
+ from nova import utils
+
+ @utils.synchronized('mylock')
+ def bar(self, *args):
+ ...
+
+ The lock_file_prefix argument is used to provide lock files on disk with a
+ meaningful prefix.
+ """
+
+ return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
diff --git a/tempest/openstack/common/log.py b/tempest/openstack/common/log.py
new file mode 100644
index 0000000..4133c30
--- /dev/null
+++ b/tempest/openstack/common/log.py
@@ -0,0 +1,559 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Openstack logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods. If the context object
+is not specified, default formatting is used. Additionally, an instance uuid
+may be passed as part of the log message, which is intended to make it easier
+for admins to find messages related to a specific instance.
+
+It also allows setting of formatting information through conf.
+
+"""
+
+import inspect
+import itertools
+import logging
+import logging.config
+import logging.handlers
+import os
+import sys
+import traceback
+
+from oslo.config import cfg
+from six import moves
+
+from tempest.openstack.common.gettextutils import _ # noqa
+from tempest.openstack.common import importutils
+from tempest.openstack.common import jsonutils
+from tempest.openstack.common import local
+
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+common_cli_opts = [
+ cfg.BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output (set logging level to '
+ 'DEBUG instead of default WARNING level).'),
+ cfg.BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output (set logging level to '
+ 'INFO instead of default WARNING level).'),
+]
+
+logging_cli_opts = [
+ cfg.StrOpt('log-config',
+ metavar='PATH',
+ help='If this option is specified, the logging configuration '
+ 'file specified is used and overrides any other logging '
+ 'options specified. Please see the Python logging module '
+ 'documentation for details on logging configuration '
+ 'files.'),
+ cfg.StrOpt('log-format',
+ default=None,
+ metavar='FORMAT',
+ help='DEPRECATED. '
+ 'A logging.Formatter log message format string which may '
+ 'use any of the available logging.LogRecord attributes. '
+ 'This option is deprecated. Please use '
+ 'logging_context_format_string and '
+ 'logging_default_format_string instead.'),
+ cfg.StrOpt('log-date-format',
+ default=_DEFAULT_LOG_DATE_FORMAT,
+ metavar='DATE_FORMAT',
+ help='Format string for %%(asctime)s in log records. '
+ 'Default: %(default)s'),
+ cfg.StrOpt('log-file',
+ metavar='PATH',
+ deprecated_name='logfile',
+ help='(Optional) Name of log file to output to. '
+ 'If no default is set, logging will go to stdout.'),
+ cfg.StrOpt('log-dir',
+ deprecated_name='logdir',
+ help='(Optional) The base directory used for relative '
+ '--log-file paths'),
+ cfg.BoolOpt('use-syslog',
+ default=False,
+ help='Use syslog for logging.'),
+ cfg.StrOpt('syslog-log-facility',
+ default='LOG_USER',
+ help='syslog facility to receive log lines')
+]
+
+generic_log_opts = [
+ cfg.BoolOpt('use_stderr',
+ default=True,
+ help='Log output to standard error')
+]
+
+log_opts = [
+ cfg.StrOpt('logging_context_format_string',
+ default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+ '%(name)s [%(request_id)s %(user)s %(tenant)s] '
+ '%(instance)s%(message)s',
+ help='format string to use for log messages with context'),
+ cfg.StrOpt('logging_default_format_string',
+ default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+ '%(name)s [-] %(instance)s%(message)s',
+ help='format string to use for log messages without context'),
+ cfg.StrOpt('logging_debug_format_suffix',
+ default='%(funcName)s %(pathname)s:%(lineno)d',
+ help='data to append to log format when level is DEBUG'),
+ cfg.StrOpt('logging_exception_prefix',
+ default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
+ '%(instance)s',
+ help='prefix each line of exception output with this format'),
+ cfg.ListOpt('default_log_levels',
+ default=[
+ 'amqplib=WARN',
+ 'sqlalchemy=WARN',
+ 'boto=WARN',
+ 'suds=INFO',
+ 'keystone=INFO',
+ 'eventlet.wsgi.server=WARN'
+ ],
+ help='list of logger=LEVEL pairs'),
+ cfg.BoolOpt('publish_errors',
+ default=False,
+ help='publish error events'),
+ cfg.BoolOpt('fatal_deprecations',
+ default=False,
+ help='make deprecations fatal'),
+
+ # NOTE(mikal): there are two options here because sometimes we are handed
+ # a full instance (and could include more information), and other times we
+ # are just handed a UUID for the instance.
+ cfg.StrOpt('instance_format',
+ default='[instance: %(uuid)s] ',
+ help='If an instance is passed with the log message, format '
+ 'it like this'),
+ cfg.StrOpt('instance_uuid_format',
+ default='[instance: %(uuid)s] ',
+ help='If an instance UUID is passed with the log message, '
+ 'format it like this'),
+]
+
+CONF = cfg.CONF
+CONF.register_cli_opts(common_cli_opts)
+CONF.register_cli_opts(logging_cli_opts)
+CONF.register_opts(generic_log_opts)
+CONF.register_opts(log_opts)
+
+# our new audit level
+# NOTE(jkoelker) Since we synthesized an audit level, make the logging
+# module aware of it so it acts like other levels.
+logging.AUDIT = logging.INFO + 1
+logging.addLevelName(logging.AUDIT, 'AUDIT')
+
+
+try:
+ NullHandler = logging.NullHandler
+except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
+ class NullHandler(logging.Handler):
+ def handle(self, record):
+ pass
+
+ def emit(self, record):
+ pass
+
+ def createLock(self):
+ self.lock = None
+
+
+def _dictify_context(context):
+ if context is None:
+ return None
+ if not isinstance(context, dict) and getattr(context, 'to_dict', None):
+ context = context.to_dict()
+ return context
+
+
+def _get_binary_name():
+ return os.path.basename(inspect.stack()[-1][1])
+
+
+def _get_log_file_path(binary=None):
+ logfile = CONF.log_file
+ logdir = CONF.log_dir
+
+ if logfile and not logdir:
+ return logfile
+
+ if logfile and logdir:
+ return os.path.join(logdir, logfile)
+
+ if logdir:
+ binary = binary or _get_binary_name()
+ return '%s.log' % (os.path.join(logdir, binary),)
+
+
+class BaseLoggerAdapter(logging.LoggerAdapter):
+
+ def audit(self, msg, *args, **kwargs):
+ self.log(logging.AUDIT, msg, *args, **kwargs)
+
+
+class LazyAdapter(BaseLoggerAdapter):
+ def __init__(self, name='unknown', version='unknown'):
+ self._logger = None
+ self.extra = {}
+ self.name = name
+ self.version = version
+
+ @property
+ def logger(self):
+ if not self._logger:
+ self._logger = getLogger(self.name, self.version)
+ return self._logger
+
+
+class ContextAdapter(BaseLoggerAdapter):
+ warn = logging.LoggerAdapter.warning
+
+ def __init__(self, logger, project_name, version_string):
+ self.logger = logger
+ self.project = project_name
+ self.version = version_string
+
+ @property
+ def handlers(self):
+ return self.logger.handlers
+
+ def deprecated(self, msg, *args, **kwargs):
+ stdmsg = _("Deprecated: %s") % msg
+ if CONF.fatal_deprecations:
+ self.critical(stdmsg, *args, **kwargs)
+ raise DeprecatedConfig(msg=stdmsg)
+ else:
+ self.warn(stdmsg, *args, **kwargs)
+
+ def process(self, msg, kwargs):
+ if 'extra' not in kwargs:
+ kwargs['extra'] = {}
+ extra = kwargs['extra']
+
+ context = kwargs.pop('context', None)
+ if not context:
+ context = getattr(local.store, 'context', None)
+ if context:
+ extra.update(_dictify_context(context))
+
+ instance = kwargs.pop('instance', None)
+ instance_extra = ''
+ if instance:
+ instance_extra = CONF.instance_format % instance
+ else:
+ instance_uuid = kwargs.pop('instance_uuid', None)
+ if instance_uuid:
+ instance_extra = (CONF.instance_uuid_format
+ % {'uuid': instance_uuid})
+ extra.update({'instance': instance_extra})
+
+ extra.update({"project": self.project})
+ extra.update({"version": self.version})
+ extra['extra'] = extra.copy()
+ return msg, kwargs
+
+
+class JSONFormatter(logging.Formatter):
+ def __init__(self, fmt=None, datefmt=None):
+ # NOTE(jkoelker) we ignore the fmt argument, but its still there
+ # since logging.config.fileConfig passes it.
+ self.datefmt = datefmt
+
+ def formatException(self, ei, strip_newlines=True):
+ lines = traceback.format_exception(*ei)
+ if strip_newlines:
+ lines = [itertools.ifilter(
+ lambda x: x,
+ line.rstrip().splitlines()) for line in lines]
+ lines = list(itertools.chain(*lines))
+ return lines
+
+ def format(self, record):
+ message = {'message': record.getMessage(),
+ 'asctime': self.formatTime(record, self.datefmt),
+ 'name': record.name,
+ 'msg': record.msg,
+ 'args': record.args,
+ 'levelname': record.levelname,
+ 'levelno': record.levelno,
+ 'pathname': record.pathname,
+ 'filename': record.filename,
+ 'module': record.module,
+ 'lineno': record.lineno,
+ 'funcname': record.funcName,
+ 'created': record.created,
+ 'msecs': record.msecs,
+ 'relative_created': record.relativeCreated,
+ 'thread': record.thread,
+ 'thread_name': record.threadName,
+ 'process_name': record.processName,
+ 'process': record.process,
+ 'traceback': None}
+
+ if hasattr(record, 'extra'):
+ message['extra'] = record.extra
+
+ if record.exc_info:
+ message['traceback'] = self.formatException(record.exc_info)
+
+ return jsonutils.dumps(message)
+
+
+def _create_logging_excepthook(product_name):
+ def logging_excepthook(type, value, tb):
+ extra = {}
+ if CONF.verbose:
+ extra['exc_info'] = (type, value, tb)
+ getLogger(product_name).critical(str(value), **extra)
+ return logging_excepthook
+
+
+class LogConfigError(Exception):
+
+ message = _('Error loading logging config %(log_config)s: %(err_msg)s')
+
+ def __init__(self, log_config, err_msg):
+ self.log_config = log_config
+ self.err_msg = err_msg
+
+ def __str__(self):
+ return self.message % dict(log_config=self.log_config,
+ err_msg=self.err_msg)
+
+
+def _load_log_config(log_config):
+ try:
+ logging.config.fileConfig(log_config)
+ except moves.configparser.Error as exc:
+ raise LogConfigError(log_config, str(exc))
+
+
+def setup(product_name):
+ """Setup logging."""
+ if CONF.log_config:
+ _load_log_config(CONF.log_config)
+ else:
+ _setup_logging_from_conf()
+ sys.excepthook = _create_logging_excepthook(product_name)
+
+
+def set_defaults(logging_context_format_string):
+ cfg.set_defaults(log_opts,
+ logging_context_format_string=
+ logging_context_format_string)
+
+
+def _find_facility_from_conf():
+ facility_names = logging.handlers.SysLogHandler.facility_names
+ facility = getattr(logging.handlers.SysLogHandler,
+ CONF.syslog_log_facility,
+ None)
+
+ if facility is None and CONF.syslog_log_facility in facility_names:
+ facility = facility_names.get(CONF.syslog_log_facility)
+
+ if facility is None:
+ valid_facilities = facility_names.keys()
+ consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
+ 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
+ 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
+ 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
+ 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
+ valid_facilities.extend(consts)
+ raise TypeError(_('syslog facility must be one of: %s') %
+ ', '.join("'%s'" % fac
+ for fac in valid_facilities))
+
+ return facility
+
+
+def _setup_logging_from_conf():
+ log_root = getLogger(None).logger
+ for handler in log_root.handlers:
+ log_root.removeHandler(handler)
+
+ if CONF.use_syslog:
+ facility = _find_facility_from_conf()
+ syslog = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
+ log_root.addHandler(syslog)
+
+ logpath = _get_log_file_path()
+ if logpath:
+ filelog = logging.handlers.WatchedFileHandler(logpath)
+ log_root.addHandler(filelog)
+
+ if CONF.use_stderr:
+ streamlog = ColorHandler()
+ log_root.addHandler(streamlog)
+
+ elif not CONF.log_file:
+ # pass sys.stdout as a positional argument
+ # python2.6 calls the argument strm, in 2.7 it's stream
+ streamlog = logging.StreamHandler(sys.stdout)
+ log_root.addHandler(streamlog)
+
+ if CONF.publish_errors:
+ handler = importutils.import_object(
+ "tempest.openstack.common.log_handler.PublishErrorsHandler",
+ logging.ERROR)
+ log_root.addHandler(handler)
+
+ datefmt = CONF.log_date_format
+ for handler in log_root.handlers:
+ # NOTE(alaski): CONF.log_format overrides everything currently. This
+ # should be deprecated in favor of context aware formatting.
+ if CONF.log_format:
+ handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
+ datefmt=datefmt))
+ log_root.info('Deprecated: log_format is now deprecated and will '
+ 'be removed in the next release')
+ else:
+ handler.setFormatter(ContextFormatter(datefmt=datefmt))
+
+ if CONF.debug:
+ log_root.setLevel(logging.DEBUG)
+ elif CONF.verbose:
+ log_root.setLevel(logging.INFO)
+ else:
+ log_root.setLevel(logging.WARNING)
+
+ for pair in CONF.default_log_levels:
+ mod, _sep, level_name = pair.partition('=')
+ level = logging.getLevelName(level_name)
+ logger = logging.getLogger(mod)
+ logger.setLevel(level)
+
+_loggers = {}
+
+
+def getLogger(name='unknown', version='unknown'):
+ if name not in _loggers:
+ _loggers[name] = ContextAdapter(logging.getLogger(name),
+ name,
+ version)
+ return _loggers[name]
+
+
+def getLazyLogger(name='unknown', version='unknown'):
+ """Returns lazy logger.
+
+ Creates a pass-through logger that does not create the real logger
+ until it is really needed and delegates all calls to the real logger
+ once it is created.
+ """
+ return LazyAdapter(name, version)
+
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.INFO):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
+
+
+class ContextFormatter(logging.Formatter):
+ """A context.RequestContext aware formatter configured through flags.
+
+ The flags used to set format strings are: logging_context_format_string
+ and logging_default_format_string. You can also specify
+ logging_debug_format_suffix to append extra formatting if the log level is
+ debug.
+
+ For information about what variables are available for the formatter see:
+ http://docs.python.org/library/logging.html#formatter
+
+ """
+
+ def format(self, record):
+ """Uses contextstring if request_id is set, otherwise default."""
+ # NOTE(sdague): default the fancier formating params
+ # to an empty string so we don't throw an exception if
+ # they get used
+ for key in ('instance', 'color'):
+ if key not in record.__dict__:
+ record.__dict__[key] = ''
+
+ if record.__dict__.get('request_id', None):
+ self._fmt = CONF.logging_context_format_string
+ else:
+ self._fmt = CONF.logging_default_format_string
+
+ if (record.levelno == logging.DEBUG and
+ CONF.logging_debug_format_suffix):
+ self._fmt += " " + CONF.logging_debug_format_suffix
+
+ # Cache this on the record, Logger will respect our formated copy
+ if record.exc_info:
+ record.exc_text = self.formatException(record.exc_info, record)
+ return logging.Formatter.format(self, record)
+
+ def formatException(self, exc_info, record=None):
+ """Format exception output with CONF.logging_exception_prefix."""
+ if not record:
+ return logging.Formatter.formatException(self, exc_info)
+
+ stringbuffer = moves.StringIO()
+ traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+ None, stringbuffer)
+ lines = stringbuffer.getvalue().split('\n')
+ stringbuffer.close()
+
+ if CONF.logging_exception_prefix.find('%(asctime)') != -1:
+ record.asctime = self.formatTime(record, self.datefmt)
+
+ formatted_lines = []
+ for line in lines:
+ pl = CONF.logging_exception_prefix % record.__dict__
+ fl = '%s%s' % (pl, line)
+ formatted_lines.append(fl)
+ return '\n'.join(formatted_lines)
+
+
+class ColorHandler(logging.StreamHandler):
+ LEVEL_COLORS = {
+ logging.DEBUG: '\033[00;32m', # GREEN
+ logging.INFO: '\033[00;36m', # CYAN
+ logging.AUDIT: '\033[01;36m', # BOLD CYAN
+ logging.WARN: '\033[01;33m', # BOLD YELLOW
+ logging.ERROR: '\033[01;31m', # BOLD RED
+ logging.CRITICAL: '\033[01;31m', # BOLD RED
+ }
+
+ def format(self, record):
+ record.color = self.LEVEL_COLORS[record.levelno]
+ return logging.StreamHandler.format(self, record)
+
+
+class DeprecatedConfig(Exception):
+ message = _("Fatal call to deprecated config: %(msg)s")
+
+ def __init__(self, msg):
+ super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/tempest/openstack/common/timeutils.py b/tempest/openstack/common/timeutils.py
new file mode 100644
index 0000000..bd60489
--- /dev/null
+++ b/tempest/openstack/common/timeutils.py
@@ -0,0 +1,188 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Time related utilities and helper functions.
+"""
+
+import calendar
+import datetime
+
+import iso8601
+import six
+
+
+# ISO 8601 extended time format with microseconds
+_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
+_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
+PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
+
+
+def isotime(at=None, subsecond=False):
+ """Stringify time in ISO 8601 format."""
+ if not at:
+ at = utcnow()
+ st = at.strftime(_ISO8601_TIME_FORMAT
+ if not subsecond
+ else _ISO8601_TIME_FORMAT_SUBSECOND)
+ tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
+ st += ('Z' if tz == 'UTC' else tz)
+ return st
+
+
+def parse_isotime(timestr):
+ """Parse time from ISO 8601 format."""
+ try:
+ return iso8601.parse_date(timestr)
+ except iso8601.ParseError as e:
+ raise ValueError(e.message)
+ except TypeError as e:
+ raise ValueError(e.message)
+
+
+def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
+ """Returns formatted utcnow."""
+ if not at:
+ at = utcnow()
+ return at.strftime(fmt)
+
+
+def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
+ """Turn a formatted time back into a datetime."""
+ return datetime.datetime.strptime(timestr, fmt)
+
+
+def normalize_time(timestamp):
+ """Normalize time in arbitrary timezone to UTC naive object."""
+ offset = timestamp.utcoffset()
+ if offset is None:
+ return timestamp
+ return timestamp.replace(tzinfo=None) - offset
+
+
+def is_older_than(before, seconds):
+ """Return True if before is older than seconds."""
+ if isinstance(before, six.string_types):
+ before = parse_strtime(before).replace(tzinfo=None)
+ return utcnow() - before > datetime.timedelta(seconds=seconds)
+
+
+def is_newer_than(after, seconds):
+ """Return True if after is newer than seconds."""
+ if isinstance(after, six.string_types):
+ after = parse_strtime(after).replace(tzinfo=None)
+ return after - utcnow() > datetime.timedelta(seconds=seconds)
+
+
+def utcnow_ts():
+ """Timestamp version of our utcnow function."""
+ return calendar.timegm(utcnow().timetuple())
+
+
+def utcnow():
+ """Overridable version of utils.utcnow."""
+ if utcnow.override_time:
+ try:
+ return utcnow.override_time.pop(0)
+ except AttributeError:
+ return utcnow.override_time
+ return datetime.datetime.utcnow()
+
+
+def iso8601_from_timestamp(timestamp):
+ """Returns a iso8601 formated date from timestamp."""
+ return isotime(datetime.datetime.utcfromtimestamp(timestamp))
+
+
+utcnow.override_time = None
+
+
+def set_time_override(override_time=datetime.datetime.utcnow()):
+ """Overrides utils.utcnow.
+
+ Make it return a constant time or a list thereof, one at a time.
+ """
+ utcnow.override_time = override_time
+
+
+def advance_time_delta(timedelta):
+ """Advance overridden time using a datetime.timedelta."""
+ assert(not utcnow.override_time is None)
+ try:
+ for dt in utcnow.override_time:
+ dt += timedelta
+ except TypeError:
+ utcnow.override_time += timedelta
+
+
+def advance_time_seconds(seconds):
+ """Advance overridden time by seconds."""
+ advance_time_delta(datetime.timedelta(0, seconds))
+
+
+def clear_time_override():
+ """Remove the overridden time."""
+ utcnow.override_time = None
+
+
+def marshall_now(now=None):
+ """Make an rpc-safe datetime with microseconds.
+
+ Note: tzinfo is stripped, but not required for relative times.
+ """
+ if not now:
+ now = utcnow()
+ return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
+ minute=now.minute, second=now.second,
+ microsecond=now.microsecond)
+
+
+def unmarshall_time(tyme):
+ """Unmarshall a datetime dict."""
+ return datetime.datetime(day=tyme['day'],
+ month=tyme['month'],
+ year=tyme['year'],
+ hour=tyme['hour'],
+ minute=tyme['minute'],
+ second=tyme['second'],
+ microsecond=tyme['microsecond'])
+
+
+def delta_seconds(before, after):
+ """Return the difference between two timing objects.
+
+ Compute the difference in seconds between two date, time, or
+ datetime objects (as a float, to microsecond resolution).
+ """
+ delta = after - before
+ try:
+ return delta.total_seconds()
+ except AttributeError:
+ return ((delta.days * 24 * 3600) + delta.seconds +
+ float(delta.microseconds) / (10 ** 6))
+
+
+def is_soon(dt, window):
+ """Determines if time is going to happen in the next window seconds.
+
+ :params dt: the time
+ :params window: minimum seconds to remain to consider the time not soon
+
+ :return: True if expiration is within the given duration
+ """
+ soon = (utcnow() + datetime.timedelta(seconds=window))
+ return normalize_time(dt) <= soon
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 8b24b2e..e785299 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -29,10 +29,10 @@
from tempest.api.network import common as net_common
-from tempest.common import log as logging
from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
import tempest.manager
+from tempest.openstack.common import log as logging
import tempest.test
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 1f75e2f..39b1e10 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -15,8 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2097f50..13b31ec 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -15,10 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
-
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -104,7 +103,7 @@
def nova_list(self):
servers = self.compute_client.servers.list()
LOG.debug("server_list:%s" % servers)
- self.assertTrue(self.server in servers)
+ self.assertIn(self.server, servers)
def nova_show(self):
got_server = self.compute_client.servers.get(self.server)
@@ -124,7 +123,7 @@
def cinder_list(self):
volumes = self.volume_client.volumes.list()
- self.assertTrue(self.volume in volumes)
+ self.assertIn(self.volume, volumes)
def cinder_show(self):
volume = self.volume_client.volumes.get(self.volume.id)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 6202e91..8ee740e 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -15,8 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
LOG = logging.getLogger(__name__)
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d318dd9..0ec3a1d 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -15,8 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
LOG = logging.getLogger(__name__)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 76fac82..6e305c1 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,10 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
-
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 89beb15..4434604 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -15,12 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-from cinderclient import exceptions
-from tempest.common import log as logging
+import time
+
+from cinderclient import exceptions as cinder_exceptions
+import testtools
+
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
-import time
+import tempest.test
LOG = logging.getLogger(__name__)
@@ -107,7 +112,7 @@
self.addCleanup(self.compute_client.security_group_rules.delete,
sg_rule.id)
- def _ssh_to_server(self, server_or_ip):
+ def _remote_client_to_server(self, server_or_ip):
if isinstance(server_or_ip, basestring):
ip = server_or_ip
else:
@@ -117,7 +122,10 @@
linux_client = RemoteClient(ip,
username,
pkey=self.keypair.private_key)
+ return linux_client
+ def _ssh_to_server(self, server_or_ip):
+ linux_client = self._remote_client_to_server(server_or_ip)
return linux_client.ssh_client
def _create_image(self, server):
@@ -142,7 +150,7 @@
try:
while volume_snapshots.get(snapshot.id):
time.sleep(1)
- except exceptions.NotFound:
+ except cinder_exceptions.NotFound:
pass
self.addCleanup(cleaner)
self._wait_for_volume_status(volume, 'available')
@@ -183,6 +191,20 @@
detach_volume_client(server.id, volume.id)
self._wait_for_volume_status(volume, 'available')
+ def _wait_for_volume_availible_on_the_system(self, server_or_ip):
+ ssh = self._remote_client_to_server(server_or_ip)
+ conf = self.config
+
+ def _func():
+ part = ssh.get_partitions()
+ LOG.debug("Partitions:%s" % part)
+ return 'vdb' in part
+
+ if not tempest.test.call_until_true(_func,
+ conf.compute.build_timeout,
+ conf.compute.build_interval):
+ raise exceptions.TimeoutException
+
def _create_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('sudo /usr/sbin/mkfs.ext4 /dev/vdb')
@@ -197,6 +219,7 @@
got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
+ @testtools.skip("Until Bug #1205344 is fixed")
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
@@ -215,6 +238,7 @@
ip_for_server = server
self._attach_volume(server, volume)
+ self._wait_for_volume_availible_on_the_system(ip_for_server)
self._create_timestamp(ip_for_server)
self._detach_volume(server, volume)
@@ -242,6 +266,7 @@
# attach volume2 to instance2
self._attach_volume(server_from_snapshot, volume_from_snapshot)
+ self._wait_for_volume_availible_on_the_system(ip_for_snapshot)
# check the existence of the timestamp file in the volume2
self._check_timestamp(ip_for_snapshot)
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index ea8b0e0..12e7034 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -21,9 +21,9 @@
from lxml import etree
-from tempest.common import log as logging
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index dac77a2..bd48068 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -23,9 +23,9 @@
import urllib
from tempest.common import glance_http
-from tempest.common import log as logging
from tempest.common.rest_client import RestClient
from tempest import exceptions
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index 17f6cba..034b452 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -16,9 +16,9 @@
import time
import urllib
-from tempest.common import log as logging
from tempest.common.rest_client import RestClient
from tempest import exceptions
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index b35c43e..017ca95 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -17,9 +17,9 @@
from lxml import etree
-from tempest.common import log as logging
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import xml_to_json
diff --git a/tempest/test.py b/tempest/test.py
index 935bb72..6c304c3 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -24,11 +24,10 @@
import testtools
from tempest import clients
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
from tempest import config
from tempest import exceptions
-from tempest import manager
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -267,19 +266,22 @@
cls.resource_keys = {}
cls.os_resources = []
- def set_resource(self, key, thing):
+ @classmethod
+ def set_resource(cls, key, thing):
LOG.debug("Adding %r to shared resources of %s" %
- (thing, self.__class__.__name__))
- self.resource_keys[key] = thing
- self.os_resources.append(thing)
+ (thing, cls.__name__))
+ cls.resource_keys[key] = thing
+ cls.os_resources.append(thing)
- def get_resource(self, key):
- return self.resource_keys[key]
+ @classmethod
+ def get_resource(cls, key):
+ return cls.resource_keys[key]
- def remove_resource(self, key):
- thing = self.resource_keys[key]
- self.os_resources.remove(thing)
- del self.resource_keys[key]
+ @classmethod
+ def remove_resource(cls, key):
+ thing = cls.resource_keys[key]
+ cls.os_resources.remove(thing)
+ del cls.resource_keys[key]
def status_timeout(self, things, thing_id, expected_status):
"""
@@ -309,13 +311,3 @@
conf.compute.build_interval):
self.fail("Timed out waiting for thing %s to become %s"
% (thing_id, expected_status))
-
-
-class ComputeFuzzClientTest(TestCase):
-
- """
- Base test case class for OpenStack Compute API (Nova)
- that uses the Tempest REST fuzz client libs for calling the API.
- """
-
- manager_class = manager.ComputeFuzzClientManager
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 9ff628c..ba627e3 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -28,10 +28,10 @@
import keystoneclient.exceptions
import tempest.clients
-from tempest.common import log as logging
from tempest.common.utils.file_utils import have_effective_read_access
import tempest.config
from tempest import exceptions
+from tempest.openstack.common import log as logging
import tempest.test
from tempest.thirdparty.boto.utils.wait import re_search_wait
from tempest.thirdparty.boto.utils.wait import state_wait
@@ -58,8 +58,9 @@
A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
boto_logger = logging.getLogger('boto')
- level = boto_logger.level
- boto_logger.setLevel(orig_logging.CRITICAL) # suppress logging for these
+ level = boto_logger.logger.level
+ boto_logger.logger.setLevel(orig_logging.CRITICAL) # suppress logging
+ # for these
def _cred_sub_check(connection_data):
if not id_matcher.match(connection_data["aws_access_key_id"]):
@@ -99,7 +100,7 @@
except keystoneclient.exceptions.Unauthorized:
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" faild to get them even by keystoneclient"
- boto_logger.setLevel(level)
+ boto_logger.logger.setLevel(level)
return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 1201866..df2ff6a 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -19,10 +19,10 @@
import testtools
from tempest import clients
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.thirdparty.boto.test import BotoTestCase
from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
@@ -88,6 +88,53 @@
image["image_id"])
@attr(type='smoke')
+ def test_run_idempotent_instances(self):
+ # EC2 run instances idempotently
+
+ def _run_instance(client_token):
+ reservation = self.ec2_client.run_instances(
+ image_id=self.images["ami"]["image_id"],
+ kernel_id=self.images["aki"]["image_id"],
+ ramdisk_id=self.images["ari"]["image_id"],
+ instance_type=self.instance_type,
+ client_token=client_token)
+ rcuk = self.addResourceCleanUp(self.destroy_reservation,
+ reservation)
+ return (reservation, rcuk)
+
+ def _terminate_reservation(reservation, rcuk):
+ for instance in reservation.instances:
+ instance.terminate()
+ self.cancelResourceCleanUp(rcuk)
+
+ reservation_1, rcuk_1 = _run_instance('token_1')
+ reservation_2, rcuk_2 = _run_instance('token_2')
+ reservation_1a, rcuk_1a = _run_instance('token_1')
+
+ self.assertIsNotNone(reservation_1)
+ self.assertIsNotNone(reservation_2)
+ self.assertIsNotNone(reservation_1a)
+
+ # same reservation for token_1
+ self.assertEqual(reservation_1.id, reservation_1a.id)
+
+ # Cancel cleanup -- since it's a duplicate, it's
+ # handled by rcuk1
+ self.cancelResourceCleanUp(rcuk_1a)
+
+ _terminate_reservation(reservation_1, rcuk_1)
+ _terminate_reservation(reservation_2, rcuk_2)
+
+ reservation_3, rcuk_3 = _run_instance('token_1')
+ self.assertIsNotNone(reservation_3)
+
+ # make sure we don't get the old reservation back
+ self.assertNotEqual(reservation_1.id, reservation_3.id)
+
+ # clean up
+ _terminate_reservation(reservation_3, rcuk_3)
+
+ @attr(type='smoke')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
index c90c586..dbb3104 100644
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -16,7 +16,7 @@
# under the License.
from tempest import clients
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.thirdparty.boto.test import BotoTestCase
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 0f836d0..5e1e2cb 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -17,8 +17,6 @@
import os
-import testtools
-
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
@@ -107,7 +105,6 @@
self.images_client.get_all_images()))
self.cancelResourceCleanUp(image["cleanUp"])
- @testtools.skip("Skipped until the Bug #1074908 and #1074904 is resolved")
def test_register_get_deregister_ari_image(self):
# Register and deregister ari image
image = {"name": rand_name("ari-name-"),
diff --git a/tempest/thirdparty/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
index a309a12..f8fa61b 100644
--- a/tempest/thirdparty/boto/utils/s3.py
+++ b/tempest/thirdparty/boto/utils/s3.py
@@ -22,7 +22,7 @@
import boto
import boto.s3.key
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
index 6b3ef27..d8fca3b 100644
--- a/tempest/thirdparty/boto/utils/wait.py
+++ b/tempest/thirdparty/boto/utils/wait.py
@@ -21,8 +21,8 @@
import boto.exception
from testtools import TestCase
-from tempest.common import log as logging
import tempest.config
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
index 3bd057c..b2632f1 100644
--- a/tempest/whitebox/manager.py
+++ b/tempest/whitebox/manager.py
@@ -22,11 +22,11 @@
from sqlalchemy import create_engine, MetaData
-from tempest.common import log as logging
from tempest.common.ssh import Client
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
-from tempest import test
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
LOG = logging.getLogger(__name__)
@@ -47,7 +47,7 @@
pass
-class ComputeWhiteboxTest(test.ComputeFuzzClientTest, WhiteboxTest):
+class ComputeWhiteboxTest(manager.OfficialClientTest):
"""
Base smoke test case class for OpenStack Compute API (Nova)
@@ -64,15 +64,6 @@
cls.nova_dir = cls.config.whitebox.source_dir
cls.compute_bin_dir = cls.config.whitebox.bin_dir
cls.compute_config_path = cls.config.whitebox.config_path
- cls.servers_client = cls.manager.servers_client
- cls.images_client = cls.manager.images_client
- cls.flavors_client = cls.manager.flavors_client
- cls.extensions_client = cls.manager.extensions_client
- cls.floating_ips_client = cls.manager.floating_ips_client
- cls.keypairs_client = cls.manager.keypairs_client
- cls.security_groups_client = cls.manager.security_groups_client
- cls.limits_client = cls.manager.limits_client
- cls.volumes_client = cls.manager.volumes_client
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
@@ -80,38 +71,27 @@
cls.image_ref_alt = cls.config.compute.image_ref_alt
cls.flavor_ref = cls.config.compute.flavor_ref
cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
- cls.servers = []
+ #NOTE(afazekas): Mimics the helper method used in the api tests
@classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Tests often add things in a particular order
- # so we destroy resources in the reverse order in which resources
- # are added to the test class object
- if not cls.os_resources:
- return
- thing = cls.os_resources.pop()
- while True:
- LOG.debug("Deleting %r from shared resources of %s" %
- (thing, cls.__name__))
- # Resources in novaclient all have a delete() method
- # which destroys the resource...
- thing.delete()
- if not cls.os_resources:
- return
- thing = cls.os_resources.pop()
+ def create_server(cls, **kwargs):
+ flavor_ref = cls.config.compute.flavor_ref
+ image_ref = cls.config.compute.image_ref
+ name = rand_name(cls.__name__ + "-instance")
+ if 'name' in kwargs:
+ name = kwargs.pop('name')
+ flavor = kwargs.get('flavor', flavor_ref)
+ image_id = kwargs.get('image_id', image_ref)
- @classmethod
- def create_server(cls, image_id=None):
- """Wrapper utility that returns a test server."""
- server_name = rand_name(cls.__name__ + "-instance")
- flavor = cls.flavor_ref
- if not image_id:
- image_id = cls.image_ref
+ server = cls.compute_client.servers.create(
+ name, image_id, flavor, **kwargs)
- resp, server = cls.servers_client.create_server(
- server_name, image_id, flavor)
- cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
- cls.servers.append(server)
+ if 'wait_until' in kwargs:
+ cls.status_timeout(cls.compute_client.servers, server.id,
+ server['id'], kwargs['wait_until'])
+
+ server = cls.compute_client.servers.get(server.id)
+ cls.set_resource(name, server)
return server
@classmethod
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
index dc68336..0afb17e 100644
--- a/tempest/whitebox/test_images_whitebox.py
+++ b/tempest/whitebox/test_images_whitebox.py
@@ -15,23 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
from tempest.whitebox import manager
-#TODO(afazekas): The whitebox tests are using complex testclass/manager
-# hierarchy, without a real need. It is difficult to maintain.
-# They could share more code with scenario tests.
+from novaclient import exceptions
-class ImagesWhiteboxTest(manager.ComputeWhiteboxTest, base.BaseComputeTest):
+class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ImagesWhiteboxTest, cls).setUpClass()
- cls.client = cls.images_client
+ cls.create_image = cls.compute_client.servers.create_image
cls.connection, cls.meta = cls.get_db_handle_and_meta()
cls.shared_server = cls.create_server()
cls.image_ids = []
@@ -39,7 +35,6 @@
@classmethod
def tearDownClass(cls):
"""Delete images and server after a test is executed."""
- cls.servers_client.delete_server(cls.shared_server['id'])
for image_id in cls.image_ids:
cls.client.delete_image(image_id)
cls.image_ids.remove(image_id)
@@ -62,18 +57,18 @@
def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
"""Base method for create image tests based on vm and task states."""
try:
- self.update_state(self.shared_server['id'], vm_state,
+ self.update_state(self.shared_server.id, vm_state,
task_state, deleted)
image_name = rand_name('snap-')
- self.assertRaises(exceptions.Duplicate,
- self.client.create_image,
- self.shared_server['id'], image_name)
+ self.assertRaises(exceptions.Conflict,
+ self.create_image,
+ self.shared_server.id, image_name)
except Exception:
self.fail("Should not allow create image when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
finally:
- self.update_state(self.shared_server['id'], 'active', None)
+ self.update_state(self.shared_server.id, 'active', None)
def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
# 409 error when instance states are building,scheduling
diff --git a/tox.ini b/tox.ini
index eb1ef4b..93a53ac 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,7 +30,7 @@
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- sh tools/pretty_tox.sh 'tempest.api tempest.scenario tempest.thirdparty tempest.cli'
+ sh tools/pretty_tox.sh 'tempest.api tempest.scenario tempest.thirdparty tempest.cli {posargs}'
[testenv:smoke]
sitepackages = True