Merge "Add missing import of 'subunit' in test-requirements.txt"
diff --git a/HACKING.rst b/HACKING.rst
index 5153fe1..2fa949d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -122,3 +122,28 @@
- Consistently check the status code of responses in testcases. The
earlier a problem is detected the easier it is to debug, especially
where there is complicated setup required.
+
+Parallel Test Exectution
+------------------------
+Tempest by default runs its tests in parallel this creates the possibility for
+interesting interactions between tests which can cause unexpected failures.
+Tenant isolation provides protection from most of the potential race conditions
+between tests outside the same class. But there are still a few of things to
+watch out for to try to avoid issues when running your tests in parallel.
+
+- Resources outside of a tenant scope still have the potential to conflict. This
+ is a larger concern for the admin tests since most resources and actions that
+ require admin privleges are outside of tenants.
+
+- Races between methods in the same class are not a problem because
+ parallelization in tempest is at the test class level, but if there is a json
+ and xml version of the same test class there could still be a race between
+ methods.
+
+- The rand_name() function from tempest.common.utils.data_utils should be used
+ anywhere a resource is created with a name. Static naming should be avoided
+ to prevent resource conflicts.
+
+- If the execution of a set of tests is required to be serialized then locking
+ can be used to perform this. See AggregatesAdminTest in
+ tempest.api.compute.admin for an example of using locking.
diff --git a/README.rst b/README.rst
index f18628a..4161cc6 100644
--- a/README.rst
+++ b/README.rst
@@ -32,16 +32,18 @@
will have a configuration file already set up to work with your
devstack installation.
-Tempest is not tied to any single test runner, but Nose been the most commonly
+Tempest is not tied to any single test runner, but testr is the most commonly
used tool. After setting up your configuration file, you can execute
-the set of Tempest tests by using ``nosetests`` ::
+the set of Tempest tests by using ``testr`` ::
- $> nosetests tempest
+ $> testr run --parallel tempest
To run one single test ::
- $> nosetests -sv tempest.api.compute.servers.test_server_actions.py:
- ServerActionsTestJSON.test_rebuild_nonexistent_server
+ $> testr run --parallel tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_nonexistent_server
+
+Alternatively, you can use the run_tests.sh script which will create a venv
+and run the tests or use tox to do the same.
Configuration
-------------
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index cd57354..2ecace0 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -28,7 +28,8 @@
uri = http://127.0.0.1:5000/v2.0/
# URL for where to find the OpenStack V3 Identity API endpoint (Keystone)
uri_v3 = http://127.0.0.1:5000/v3/
-# The identity region
+# The identity region. Also used as the other services' region name unless
+# they are set explicitly.
region = RegionOne
# This should be the username of a user WITHOUT administrative privileges
@@ -137,6 +138,11 @@
# this value as "compute"
catalog_type = compute
+# The name of a region for compute. If empty or commented-out, the value of
+# identity.region is used instead. If no such region is found in the service
+# catalog, the first found one is used.
+#region = RegionOne
+
# Does the Compute API support creation of images?
create_image_enabled = true
@@ -186,6 +192,11 @@
# this value as "image"
catalog_type = image
+# The name of a region for image. If empty or commented-out, the value of
+# identity.region is used instead. If no such region is found in the service
+# catalog, the first found one is used.
+#region = RegionOne
+
# The version of the OpenStack Images API to use
api_version = 1
@@ -201,6 +212,11 @@
# Catalog type of the Neutron Service
catalog_type = network
+# The name of a region for network. If empty or commented-out, the value of
+# identity.region is used instead. If no such region is found in the service
+# catalog, the first found one is used.
+#region = RegionOne
+
# A large private cidr block from which to allocate smaller blocks for
# tenant networks.
tenant_network_cidr = 10.100.0.0/16
@@ -230,6 +246,10 @@
# Unless you have a custom Keystone service catalog implementation, you
# probably want to leave this value as "volume"
catalog_type = volume
+# The name of a region for volume. If empty or commented-out, the value of
+# identity.region is used instead. If no such region is found in the service
+# catalog, the first found one is used.
+#region = RegionOne
# The disk format to use when copying a volume to image
disk_format = raw
# Number of seconds to wait while looping to check the status of a
@@ -260,6 +280,11 @@
# this value as "object-store"
catalog_type = object-store
+# The name of a region for object storage. If empty or commented-out, the
+# value of identity.region is used instead. If no such region is found in
+# the service catalog, the first found one is used.
+#region = RegionOne
+
# Number of seconds to time on waiting for a container to container
# synchronization complete
container_sync_timeout = 120
@@ -318,6 +343,16 @@
build_interval = 1
[orchestration]
+# The type of endpoint for an Orchestration API service. Unless you have a
+# custom Keystone service catalog implementation, you probably want to leave
+# this value as "orchestration"
+catalog_type = orchestration
+
+# The name of a region for orchestration. If empty or commented-out, the value
+# of identity.region is used instead. If no such region is found in the service
+# catalog, the first found one is used.
+#region = RegionOne
+
# Status change wait interval
build_interval = 1
diff --git a/requirements.txt b/requirements.txt
index ab48ec5..b15fb92 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,3 +19,6 @@
testrepository>=0.0.17
oslo.config>=1.1.0
eventlet>=0.13.0
+six<1.4.0
+iso8601>=0.1.4
+fixtures>=0.3.14
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 0bb0460..a5dceca 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -61,12 +61,12 @@
# Create and delete an aggregate.
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
- self.assertEquals(200, resp.status)
- self.assertEquals(aggregate_name, aggregate['name'])
- self.assertEquals(None, aggregate['availability_zone'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(aggregate_name, aggregate['name'])
+ self.assertEqual(None, aggregate['availability_zone'])
resp, _ = self.client.delete_aggregate(aggregate['id'])
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
self.client.wait_for_resource_deletion(aggregate['id'])
@attr(type='gate')
@@ -75,12 +75,12 @@
aggregate_name = rand_name(self.aggregate_name_prefix)
az_name = rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name, az_name)
- self.assertEquals(200, resp.status)
- self.assertEquals(aggregate_name, aggregate['name'])
- self.assertEquals(az_name, aggregate['availability_zone'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(aggregate_name, aggregate['name'])
+ self.assertEqual(az_name, aggregate['availability_zone'])
resp, _ = self.client.delete_aggregate(aggregate['id'])
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
self.client.wait_for_resource_deletion(aggregate['id'])
@attr(type='gate')
@@ -91,7 +91,7 @@
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, aggregates = self.client.list_aggregates()
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
self.assertIn((aggregate['id'], aggregate['availability_zone']),
map(lambda x: (x['id'], x['availability_zone']),
aggregates))
@@ -104,10 +104,10 @@
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, body = self.client.get_aggregate(aggregate['id'])
- self.assertEquals(200, resp.status)
- self.assertEquals(aggregate['name'], body['name'])
- self.assertEquals(aggregate['availability_zone'],
- body['availability_zone'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(aggregate['name'], body['name'])
+ self.assertEqual(aggregate['availability_zone'],
+ body['availability_zone'])
@attr(type=['negative', 'gate'])
def test_aggregate_create_as_user(self):
@@ -166,17 +166,17 @@
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
resp, body = self.client.add_host(aggregate['id'], self.host)
- self.assertEquals(200, resp.status)
- self.assertEquals(aggregate_name, body['name'])
- self.assertEquals(aggregate['availability_zone'],
- body['availability_zone'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(aggregate_name, body['name'])
+ self.assertEqual(aggregate['availability_zone'],
+ body['availability_zone'])
self.assertIn(self.host, body['hosts'])
resp, body = self.client.remove_host(aggregate['id'], self.host)
- self.assertEquals(200, resp.status)
- self.assertEquals(aggregate_name, body['name'])
- self.assertEquals(aggregate['availability_zone'],
- body['availability_zone'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(aggregate_name, body['name'])
+ self.assertEqual(aggregate['availability_zone'],
+ body['availability_zone'])
self.assertNotIn(self.host, body['hosts'])
@attr(type='gate')
@@ -191,10 +191,10 @@
resp, aggregates = self.client.list_aggregates()
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
- self.assertEquals(1, len(aggs))
+ self.assertEqual(1, len(aggs))
agg = aggs[0]
- self.assertEquals(aggregate_name, agg['name'])
- self.assertEquals(None, agg['availability_zone'])
+ self.assertEqual(aggregate_name, agg['name'])
+ self.assertEqual(None, agg['availability_zone'])
self.assertIn(self.host, agg['hosts'])
@attr(type='gate')
@@ -208,8 +208,8 @@
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
resp, body = self.client.get_aggregate(aggregate['id'])
- self.assertEquals(aggregate_name, body['name'])
- self.assertEquals(None, body['availability_zone'])
+ self.assertEqual(aggregate_name, body['name'])
+ self.assertEqual(None, body['availability_zone'])
self.assertIn(self.host, body['hosts'])
@attr(type='gate')
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 8a56b89..d1e1be6 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -51,7 +51,7 @@
@attr(type='gate')
def test_get_availability_zone_list_with_non_admin_user(self):
- # List of availability zone with non admin user
+ # List of availability zone with non-administrator user
resp, availability_zone = \
self.non_adm_client.get_availability_zone_list()
self.assertEqual(200, resp.status)
@@ -59,7 +59,8 @@
@attr(type=['negative', 'gate'])
def test_get_availability_zone_list_detail_with_non_admin_user(self):
- # List of availability zones and available services with non admin user
+ # List of availability zones and available services with
+ # non-administrator user
self.assertRaises(
exceptions.Unauthorized,
self.non_adm_client.get_availability_zone_list_detail)
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 895f773..85b03e6 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -21,42 +21,29 @@
from tempest.test import attr
-class FixedIPsBase(base.BaseComputeAdminTest):
- _interface = 'json'
- ip = None
-
- @classmethod
- def setUpClass(cls):
- super(FixedIPsBase, cls).setUpClass()
- if cls.config.service_available.neutron:
- msg = ("%s skipped as neutron is available" % cls.__name__)
- raise cls.skipException(msg)
- # NOTE(maurosr): The idea here is: the server creation is just an
- # auxiliary element to the ip details or reservation, there was no way
- # (at least none in my mind) to get an valid and existing ip except
- # by creating a server and using its ip. So the intention is to create
- # fewer server possible (one) and use it to both: json and xml tests.
- # This decreased time to run both tests, in my test machine, from 53
- # secs to 29 (agains 23 secs when running only json tests)
- if cls.ip is None:
- cls.client = cls.os_adm.fixed_ips_client
- cls.non_admin_client = cls.fixed_ips_client
- resp, server = cls.create_server(wait_until='ACTIVE')
- resp, server = cls.servers_client.get_server(server['id'])
- for ip_set in server['addresses']:
- for ip in server['addresses'][ip_set]:
- if ip['OS-EXT-IPS:type'] == 'fixed':
- cls.ip = ip['addr']
- break
- if cls.ip:
- break
-
-
-class FixedIPsTestJson(FixedIPsBase):
+class FixedIPsTestJson(base.BaseComputeAdminTest):
_interface = 'json'
CONF = config.TempestConfig()
+ @classmethod
+ def setUpClass(cls):
+ super(FixedIPsTestJson, cls).setUpClass()
+ if cls.config.service_available.neutron:
+ msg = ("%s skipped as neutron is available" % cls.__name__)
+ raise cls.skipException(msg)
+ cls.client = cls.os_adm.fixed_ips_client
+ cls.non_admin_client = cls.fixed_ips_client
+ resp, server = cls.create_server(wait_until='ACTIVE')
+ resp, server = cls.servers_client.get_server(server['id'])
+ for ip_set in server['addresses']:
+ for ip in server['addresses'][ip_set]:
+ if ip['OS-EXT-IPS:type'] == 'fixed':
+ cls.ip = ip['addr']
+ break
+ if cls.ip:
+ break
+
@attr(type='gate')
def test_list_fixed_ip_details(self):
resp, fixed_ip = self.client.get_fixed_ip_details(self.ip)
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 7efd3c1..b693227 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -136,8 +136,8 @@
# Delete the flavor
new_flavor_id = flavor['id']
resp_delete, body = self.client.delete_flavor(new_flavor_id)
- self.assertEquals(200, resp.status)
- self.assertEquals(202, resp_delete.status)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(202, resp_delete.status)
# Deleted flavors can be seen via detailed GET
resp, flavor = self.client.get_flavor_details(new_flavor_id)
@@ -230,6 +230,26 @@
self.assertFalse(flag)
@attr(type='gate')
+ def test_create_server_with_non_public_flavor(self):
+ # Create a flavor with os-flavor-access:is_public false
+ flavor_name = rand_name(self.flavor_name_prefix)
+ new_flavor_id = rand_int_id(start=1000)
+
+ # Create the flavor
+ resp, flavor = self.client.create_flavor(flavor_name,
+ self.ram, self.vcpus,
+ self.disk,
+ new_flavor_id,
+ is_public="False")
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.assertEqual(200, resp.status)
+
+ # Verify flavor is not used by other user
+ self.assertRaises(exceptions.BadRequest,
+ self.os.servers_client.create_server,
+ 'test', self.image_ref, flavor['id'])
+
+ @attr(type='gate')
def test_list_public_flavor_with_other_user(self):
# Create a Flavor with public access.
# Try to List/Get flavor with another user
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index ace77a6..e1e75cb 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -102,7 +102,7 @@
self.flavor['id'])
self.assertEqual(resp.status, 200)
for key in specs:
- self.assertEquals(body[key], specs[key])
+ self.assertEqual(body[key], specs[key])
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_unset_keys(self):
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 51ce20c..c3ba671 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -52,7 +52,7 @@
@attr(type=['negative', 'gate'])
def test_get_non_existant_flavor(self):
- # flavor details are not returned for non existant flavors
+ # flavor details are not returned for non-existent flavors
self.assertRaises(exceptions.NotFound, self.client.get_flavor_details,
999)
@@ -150,7 +150,7 @@
@attr(type=['negative', 'gate'])
def test_get_flavor_details_for_invalid_flavor_id(self):
- # Ensure 404 returned for non-existant flavor ID
+ # Ensure 404 returned for non-existent flavor ID
self.assertRaises(exceptions.NotFound, self.client.get_flavor_details,
9999)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index e380334..f5baa3c 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -80,12 +80,12 @@
@attr(type=['negative', 'gate'])
def test_get_nonexistant_floating_ip_details(self):
# Negative test:Should not be able to GET the details
- # of nonexistant floating IP
+ # of non-existent floating IP
floating_ip_id = []
resp, body = self.client.list_floating_ips()
for i in range(len(body)):
floating_ip_id.append(body[i]['id'])
- # Creating a nonexistant floatingIP id
+ # Creating a non-existent floatingIP id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in floating_ip_id:
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 52239cd..a769744 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -120,20 +120,20 @@
@attr(type=['negative', 'gate'])
def test_update_nonexistant_image_metadata(self):
- # Negative test:An update should not happen for a nonexistant image
+ # Negative test:An update should not happen for a non-existent image
meta = {'key1': 'alt1', 'key2': 'alt2'}
self.assertRaises(exceptions.NotFound,
self.client.update_image_metadata, 999, meta)
@attr(type=['negative', 'gate'])
def test_get_nonexistant_image_metadata_item(self):
- # Negative test: Get on nonexistant image should not happen
+ # Negative test: Get on non-existent image should not happen
self.assertRaises(exceptions.NotFound,
self.client.get_image_metadata_item, 999, 'key2')
@attr(type=['negative', 'gate'])
def test_set_nonexistant_image_metadata(self):
- # Negative test: Metadata should not be set to a nonexistant image
+ # Negative test: Metadata should not be set to a non-existent image
meta = {'key1': 'alt1', 'key2': 'alt2'}
self.assertRaises(exceptions.NotFound, self.client.set_image_metadata,
999, meta)
@@ -149,8 +149,8 @@
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_image_metadata_item(self):
- # Negative test: Shouldnt be able to delete metadata
- # item from nonexistant image
+ # Negative test: Shouldn't be able to delete metadata
+ # item from non-existent image
self.assertRaises(exceptions.NotFound,
self.client.delete_image_metadata_item, 999, 'key1')
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index a80f456..e700278 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -230,7 +230,7 @@
@attr(type=['negative', 'gate'])
def test_get_nonexistant_image(self):
- # Negative test: GET on non existant image should fail
+ # Negative test: GET on non-existent image should fail
self.assertRaises(exceptions.NotFound, self.client.get_image, 999)
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 083fbd7..78c547a 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -157,7 +157,7 @@
k_name = rand_name('keypair-')
resp, _ = self.client.create_keypair(k_name)
self.assertEqual(200, resp.status)
- # Now try the same keyname to ceate another key
+ # Now try the same keyname to create another key
self.assertRaises(exceptions.Duplicate, self.client.create_keypair,
k_name)
resp, _ = self.client.delete_keypair(k_name)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 472b8b4..6071e54 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -35,7 +35,7 @@
@attr(type='gate')
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
- # should be successfull
+ # should be successful
# Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -59,7 +59,7 @@
def test_security_group_rules_create_with_optional_arguments(self):
# Positive test: Creation of Security Group rule
# with optional arguments
- # should be successfull
+ # should be successful
secgroup1 = None
secgroup2 = None
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 30db206..3e459a2 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -112,12 +112,12 @@
@attr(type=['negative', 'gate'])
def test_security_group_get_nonexistant_group(self):
# Negative test:Should not be able to GET the details
- # of nonexistant Security Group
+ # of non-existent Security Group
security_group_id = []
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- # Creating a nonexistant Security Group id
+ # Creating a non-existent Security Group id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
@@ -201,12 +201,12 @@
"Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_security_group(self):
- # Negative test:Deletion of a nonexistant Security Group should Fail
+ # Negative test:Deletion of a non-existent Security Group should Fail
security_group_id = []
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- # Creating Non Existant Security Group
+ # Creating non-existent Security Group
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 45de0d6..9997b97 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -115,7 +115,7 @@
@attr(type='gate')
def test_get_server_metadata_item(self):
- # The value for a specic metadata key should be returned
+ # The value for a specific metadata key should be returned
resp, meta = self.client.get_server_metadata_item(self.server_id,
'key2')
self.assertTrue('value2', meta['key2'])
@@ -148,13 +148,13 @@
@attr(type=['negative', 'gate'])
def test_get_nonexistant_server_metadata_item(self):
- # Negative test: GET on nonexistant server should not succeed
+ # Negative test: GET on a non-existent server should not succeed
self.assertRaises(exceptions.NotFound,
self.client.get_server_metadata_item, 999, 'test2')
@attr(type=['negative', 'gate'])
def test_list_nonexistant_server_metadata(self):
- # Negative test:List metadata on a non existant server should
+ # Negative test:List metadata on a non-existent server should
# not succeed
self.assertRaises(exceptions.NotFound,
self.client.list_server_metadata, 999)
@@ -171,7 +171,7 @@
@attr(type=['negative', 'gate'])
def test_set_nonexistant_server_metadata(self):
- # Negative test: Set metadata on a non existant server should not
+ # Negative test: Set metadata on a non-existent server should not
# succeed
meta = {'meta1': 'data1'}
self.assertRaises(exceptions.NotFound,
@@ -179,7 +179,7 @@
@attr(type=['negative', 'gate'])
def test_update_nonexistant_server_metadata(self):
- # Negative test: An update should not happen for a nonexistant image
+ # Negative test: An update should not happen for a non-existent image
meta = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(exceptions.NotFound,
self.client.update_server_metadata, 999, meta)
@@ -195,7 +195,7 @@
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_server_metadata_item(self):
# Negative test: Should not be able to delete metadata item from a
- # nonexistant server
+ # non-existent server
# Delete the metadata item
self.assertRaises(exceptions.NotFound,
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 3ff2538..1dff806 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -81,7 +81,7 @@
# Update the server with a new name
resp, server = self.client.update_server(server['id'],
name='newname')
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
# Verify the name of the server has changed
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index e09a23f..226c40e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -154,7 +154,7 @@
@attr(type=['negative', 'gate'])
def test_create_with_non_existant_keypair(self):
- # Pass a non existant keypair while creating a server
+ # Pass a non-existent keypair while creating a server
key_name = rand_name('key')
self.assertRaises(exceptions.BadRequest,
diff --git a/tempest/api/compute/test_live_block_migration.py b/tempest/api/compute/test_live_block_migration.py
index 84fd653..7c60859 100644
--- a/tempest/api/compute/test_live_block_migration.py
+++ b/tempest/api/compute/test_live_block_migration.py
@@ -110,7 +110,7 @@
target_host = self._get_host_other_than(actual_host)
self._migrate_server_to(server_id, target_host)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
- self.assertEquals(target_host, self._get_host_for_server(server_id))
+ self.assertEqual(target_host, self._get_host_for_server(server_id))
@testtools.skipIf(not CONF.compute.live_migration_available,
'Live migration not available')
@@ -122,7 +122,7 @@
self.assertRaises(exceptions.BadRequest, self._migrate_server_to,
server_id, target_host)
- self.assertEquals('ACTIVE', self._get_server_status(server_id))
+ self.assertEqual('ACTIVE', self._get_server_status(server_id))
@testtools.skipIf(not CONF.compute.live_migration_available or
not CONF.compute.use_block_migration_for_live_migration,
@@ -153,7 +153,7 @@
self._migrate_server_to(server_id, target_host)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
- self.assertEquals(target_host, self._get_host_for_server(server_id))
+ self.assertEqual(target_host, self._get_host_for_server(server_id))
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index cc112cc..c234efd 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -56,7 +56,7 @@
@attr(type='gate')
def test_list_roles_by_unauthorized_user(self):
- # Non admin user should not be able to list roles
+ # Non-administrator user should not be able to list roles
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_roles)
@@ -116,7 +116,8 @@
@attr(type='gate')
def test_assign_user_role_by_unauthorized_user(self):
- # Non admin user should not be authorized to assign a role to user
+ # Non-administrator user should not be authorized to
+ # assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.assign_user_role,
@@ -170,11 +171,12 @@
user['id'], role['id'])
resp, body = self.client.remove_user_role(tenant['id'], user['id'],
user_role['id'])
- self.assertEquals(resp['status'], '204')
+ self.assertEqual(resp['status'], '204')
@attr(type='gate')
def test_remove_user_role_by_unauthorized_user(self):
- # Non admin user should not be authorized to remove a user's role
+ # Non-administrator user should not be authorized to
+ # remove a user's role
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -237,7 +239,8 @@
@attr(type='gate')
def test_list_user_roles_by_unauthorized_user(self):
- # Non admin user should not be authorized to list a user's roles
+ # Non-administrator user should not be authorized to list
+ # a user's roles
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(exceptions.Unauthorized,
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index 2be0c29..508c177 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -50,7 +50,7 @@
self.assertTrue(resp['status'].startswith('2'))
# verifying the existence of service created
self.assertIn('id', fetched_service)
- self.assertEquals(fetched_service['id'], service_data['id'])
+ self.assertEqual(fetched_service['id'], service_data['id'])
self.assertIn('name', fetched_service)
self.assertEqual(fetched_service['name'], service_data['name'])
self.assertIn('type', fetched_service)
diff --git a/tempest/api/identity/admin/test_tenants.py b/tempest/api/identity/admin/test_tenants.py
index e8625db..a61a115 100644
--- a/tempest/api/identity/admin/test_tenants.py
+++ b/tempest/api/identity/admin/test_tenants.py
@@ -26,7 +26,7 @@
@attr(type='gate')
def test_list_tenants_by_unauthorized_user(self):
- # Non-admin user should not be able to list tenants
+ # Non-administrator user should not be able to list tenants
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_tenants)
@@ -63,7 +63,7 @@
@attr(type='gate')
def test_tenant_delete_by_unauthorized_user(self):
- # Non-admin user should not be able to delete a tenant
+ # Non-administrator user should not be able to delete a tenant
tenant_name = rand_name('tenant-')
resp, tenant = self.client.create_tenant(tenant_name)
self.data.tenants.append(tenant)
@@ -164,7 +164,7 @@
@attr(type='gate')
def test_create_tenant_by_unauthorized_user(self):
- # Non-admin user should not be authorized to create a tenant
+ # Non-administrator user should not be authorized to create a tenant
tenant_name = rand_name('tenant-')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_tenant, tenant_name)
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 4cfeb45..057e633 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -48,7 +48,7 @@
@attr(type=['negative', 'gate'])
def test_create_user_by_unauthorized_user(self):
- # Non-admin should not be authorized to create a user
+ # Non-administrator should not be authorized to create a user
self.data.setup_test_tenant()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_user, self.alt_user,
@@ -109,13 +109,13 @@
resp, user = self.client.create_user(alt_user2, self.alt_password,
self.data.tenant['id'],
self.alt_email)
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
resp, body = self.client.delete_user(user['id'])
- self.assertEquals('204', resp['status'])
+ self.assertEqual('204', resp['status'])
@attr(type=['negative', 'gate'])
def test_delete_users_by_unauthorized_user(self):
- # Non admin user should not be authorized to delete a user
+ # Non-administrator user should not be authorized to delete a user
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_user,
@@ -213,7 +213,7 @@
@attr(type=['negative', 'gate'])
def test_get_users_by_unauthorized_user(self):
- # Non admin user should not be authorized to get user list
+ # Non-administrator user should not be authorized to get user list
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.get_users)
@@ -236,7 +236,7 @@
resp, user1 = self.client.create_user(alt_tenant_user1, 'password1',
self.data.tenant['id'],
'user1@123')
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
user_ids.append(user1['id'])
self.data.users.append(user1)
@@ -244,7 +244,7 @@
resp, user2 = self.client.create_user(alt_tenant_user2, 'password2',
self.data.tenant['id'],
'user2@123')
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
user_ids.append(user2['id'])
self.data.users.append(user2)
# List of users for the respective tenant ID
@@ -273,22 +273,22 @@
user_ids.append(user['id'])
resp, role = self.client.assign_user_role(tenant['id'], user['id'],
role['id'])
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
alt_user2 = rand_name('second_user_')
resp, second_user = self.client.create_user(alt_user2, 'password1',
self.data.tenant['id'],
'user2@123')
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
user_ids.append(second_user['id'])
self.data.users.append(second_user)
resp, role = self.client.assign_user_role(tenant['id'],
second_user['id'],
role['id'])
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
# List of users with roles for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
- self.assertEquals('200', resp['status'])
+ self.assertEqual('200', resp['status'])
for i in body:
fetched_user_ids.append(i['id'])
# verifying the user Id in the list
@@ -301,7 +301,7 @@
@attr(type=['negative', 'gate'])
def test_list_users_with_invalid_tenant(self):
# Should not be able to return a list of all
- # users for a nonexistant tenant
+ # users for a non-existent tenant
# Assign invalid tenant ids
invalid_id = list()
invalid_id.append(rand_name('999'))
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index efd2f83..cda5863 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -112,8 +112,7 @@
if c not in fetched_cred_ids]
self.assertEqual(0, len(missing_creds),
"Failed to find cred %s in fetched list" %
- ', '.join(m_cred for m_cred
- in missing_creds))
+ ', '.join(m_cred for m_cred in missing_creds))
class CredentialsTestXML(CredentialsTestJSON):
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 9136934..2fbef77 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -25,7 +25,7 @@
_interface = 'json'
def _delete_domain(self, domain_id):
- # It is necessary to disable the domian before deleting,
+ # It is necessary to disable the domain before deleting,
# or else it would result in unauthorized error
_, body = self.v3_client.update_domain(domain_id, enabled=False)
resp, _ = self.v3_client.delete_domain(domain_id)
@@ -39,7 +39,7 @@
for _ in range(3):
_, domain = self.v3_client.create_domain(
rand_name('domain-'), description=rand_name('domain-desc-'))
- # Delete the domian at the end of this method
+ # Delete the domain at the end of this method
self.addCleanup(self._delete_domain, domain['id'])
domain_ids.append(domain['id'])
# List and Verify Domains
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 9d143ed..02a6f5b 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -59,7 +59,7 @@
def test_list_endpoints(self):
# Get a list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
- # Asserting LIST Endpoint
+ # Asserting LIST endpoints
self.assertEqual(resp['status'], '200')
missing_endpoints =\
[e for e in self.setup_endpoints if e not in fetched_endpoints]
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 980323a..a238c46 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -54,7 +54,7 @@
resp[1], _ = cls.v3_client.delete_group(cls.group_body['id'])
resp[2], _ = cls.v3_client.delete_user(cls.user_body['id'])
resp[3], _ = cls.v3_client.delete_project(cls.project['id'])
- # NOTE(harika-vakadi): It is necessary to disable the domian
+ # NOTE(harika-vakadi): It is necessary to disable the domain
# before deleting,or else it would result in unauthorized error
cls.v3_client.update_domain(cls.domain['id'], enabled=False)
resp[4], _ = cls.v3_client.delete_domain(cls.domain['id'])
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index bf7a554..50e9702 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -116,7 +116,7 @@
self.assertEqual(0, len(missing_projects),
"Failed to find project %s in fetched list" %
', '.join(m_project for m_project
- in missing_projects))
+ in missing_projects))
class UsersV3TestXML(UsersV3TestJSON):
diff --git a/tempest/api/image/v1/test_image_members.py b/tempest/api/image/v1/test_image_members.py
index 29bcaf4..fcbde50 100644
--- a/tempest/api/image/v1/test_image_members.py
+++ b/tempest/api/image/v1/test_image_members.py
@@ -42,7 +42,7 @@
disk_format='raw',
is_public=True,
data=image_file)
- self.assertEquals(201, resp.status)
+ self.assertEqual(201, resp.status)
image_id = image['id']
return image_id
@@ -50,9 +50,9 @@
def test_add_image_member(self):
image = self._create_image()
resp = self.client.add_member(self.tenants[0], image)
- self.assertEquals(204, resp.status)
+ self.assertEqual(204, resp.status)
resp, body = self.client.get_image_membership(image)
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
members = body['members']
members = map(lambda x: x['member_id'], members)
self.assertIn(self.tenants[0], members)
@@ -61,12 +61,12 @@
def test_get_shared_images(self):
image = self._create_image()
resp = self.client.add_member(self.tenants[0], image)
- self.assertEquals(204, resp.status)
+ self.assertEqual(204, resp.status)
share_image = self._create_image()
resp = self.client.add_member(self.tenants[0], share_image)
- self.assertEquals(204, resp.status)
+ self.assertEqual(204, resp.status)
resp, body = self.client.get_shared_images(self.tenants[0])
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
images = body['shared_images']
images = map(lambda x: x['image_id'], images)
self.assertIn(share_image, images)
@@ -76,10 +76,10 @@
def test_remove_member(self):
image_id = self._create_image()
resp = self.client.add_member(self.tenants[0], image_id)
- self.assertEquals(204, resp.status)
+ self.assertEqual(204, resp.status)
resp = self.client.delete_member(self.tenants[0], image_id)
- self.assertEquals(204, resp.status)
+ self.assertEqual(204, resp.status)
resp, body = self.client.get_image_membership(image_id)
- self.assertEquals(200, resp.status)
+ self.assertEqual(200, resp.status)
members = body['members']
- self.assertEquals(0, len(members))
+ self.assertEqual(0, len(members))
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 19c5f84..4a4bf60 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -56,9 +56,21 @@
cls.networks = []
cls.subnets = []
cls.ports = []
+ cls.pools = []
+ cls.vips = []
+ cls.members = []
+ cls.health_monitors = []
@classmethod
def tearDownClass(cls):
+ for health_monitor in cls.health_monitors:
+ cls.client.delete_health_monitor(health_monitor['id'])
+ for member in cls.members:
+ cls.client.delete_member(member['id'])
+ for vip in cls.vips:
+ cls.client.delete_vip(vip['id'])
+ for pool in cls.pools:
+ cls.client.delete_pool(pool['id'])
for port in cls.ports:
cls.client.delete_port(port['id'])
for subnet in cls.subnets:
@@ -111,3 +123,41 @@
port = body['port']
cls.ports.append(port)
return port
+
+ @classmethod
+ def create_pool(cls, name, lb_method, protocol, subnet):
+ """Wrapper utility that returns a test pool."""
+ resp, body = cls.client.create_pool(name, lb_method, protocol,
+ subnet['id'])
+ pool = body['pool']
+ cls.pools.append(pool)
+ return pool
+
+ @classmethod
+ def create_vip(cls, name, protocol, protocol_port, subnet, pool):
+ """Wrapper utility that returns a test vip."""
+ resp, body = cls.client.create_vip(name, protocol, protocol_port,
+ subnet['id'], pool['id'])
+ vip = body['vip']
+ cls.vips.append(vip)
+ return vip
+
+ @classmethod
+ def create_member(cls, protocol_port, pool):
+ """Wrapper utility that returns a test member."""
+ resp, body = cls.client.create_member("10.0.9.46",
+ protocol_port,
+ pool['id'])
+ member = body['member']
+ cls.members.append(member)
+ return member
+
+ @classmethod
+ def create_health_monitor(cls, delay, max_retries, Type, timeout):
+ """Wrapper utility that returns a test health monitor."""
+ resp, body = cls.client.create_health_monitor(delay,
+ max_retries,
+ Type, timeout)
+ health_monitor = body['health_monitor']
+ cls.health_monitors.append(health_monitor)
+ return health_monitor
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
new file mode 100644
index 0000000..62017dc
--- /dev/null
+++ b/tempest/api/network/test_load_balancer.py
@@ -0,0 +1,210 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class LoadBalancerJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ create vIP, and Pool
+ show vIP
+ list vIP
+ update vIP
+ delete vIP
+ update pool
+ delete pool
+ show pool
+ list pool
+ health monitoring operations
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(LoadBalancerJSON, cls).setUpClass()
+ cls.network = cls.create_network()
+ cls.name = cls.network['name']
+ cls.subnet = cls.create_subnet(cls.network)
+ pool_name = rand_name('pool-')
+ vip_name = rand_name('vip-')
+ cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
+ "HTTP", cls.subnet)
+ cls.vip = cls.create_vip(vip_name, "HTTP", 80, cls.subnet, cls.pool)
+ cls.member = cls.create_member(80, cls.pool)
+ cls.health_monitor = cls.create_health_monitor(4, 3, "TCP", 1)
+
+ @attr(type='smoke')
+ def test_list_vips(self):
+ # Verify the vIP exists in the list of all vIPs
+ resp, body = self.client.list_vips()
+ self.assertEqual('200', resp['status'])
+ vips = body['vips']
+ found = None
+ for n in vips:
+ if (n['id'] == self.vip['id']):
+ found = n['id']
+ msg = "vIPs list doesn't contain created vip"
+ self.assertIsNotNone(found, msg)
+
+ def test_create_update_delete_pool_vip(self):
+ # Creates a vip
+ name = rand_name('vip-')
+ resp, body = self.client.create_pool(rand_name("pool-"),
+ "ROUND_ROBIN", "HTTP",
+ self.subnet['id'])
+ pool = body['pool']
+ resp, body = self.client.create_vip(name, "HTTP", 80,
+ self.subnet['id'], pool['id'])
+ self.assertEqual('201', resp['status'])
+ vip = body['vip']
+ vip_id = vip['id']
+ # Verification of vip update
+ new_name = "New_vip"
+ resp, body = self.client.update_vip(vip_id, new_name)
+ self.assertEqual('200', resp['status'])
+ updated_vip = body['vip']
+ self.assertEqual(updated_vip['name'], new_name)
+ # Verification of vip delete
+ resp, body = self.client.delete_vip(vip['id'])
+ self.assertEqual('204', resp['status'])
+ # Verification of pool update
+ new_name = "New_pool"
+ resp, body = self.client.update_pool(pool['id'], new_name)
+ self.assertEqual('200', resp['status'])
+ updated_pool = body['pool']
+ self.assertEqual(updated_pool['name'], new_name)
+ # Verification of pool delete
+ resp, body = self.client.delete_pool(pool['id'])
+ self.assertEqual('204', resp['status'])
+
+ @attr(type='smoke')
+ def test_show_vip(self):
+ # Verifies the details of a vip
+ resp, body = self.client.show_vip(self.vip['id'])
+ self.assertEqual('200', resp['status'])
+ vip = body['vip']
+ self.assertEqual(self.vip['id'], vip['id'])
+ self.assertEqual(self.vip['name'], vip['name'])
+
+ @attr(type='smoke')
+ def test_show_pool(self):
+ # Verifies the details of a pool
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ pool = body['pool']
+ self.assertEqual(self.pool['id'], pool['id'])
+ self.assertEqual(self.pool['name'], pool['name'])
+
+ @attr(type='smoke')
+ def test_list_pools(self):
+ # Verify the pool exists in the list of all pools
+ resp, body = self.client.list_pools()
+ self.assertEqual('200', resp['status'])
+ pools = body['pools']
+ self.assertIn(self.pool['id'], [p['id'] for p in pools])
+
+ @attr(type='smoke')
+ def test_list_members(self):
+ # Verify the member exists in the list of all members
+ resp, body = self.client.list_members()
+ self.assertEqual('200', resp['status'])
+ members = body['members']
+ self.assertIn(self.member['id'], [m['id'] for m in members])
+
+ @attr(type='smoke')
+ def test_create_update_delete_member(self):
+ # Creates a member
+ resp, body = self.client.create_member("10.0.9.46", 80,
+ self.pool['id'])
+ self.assertEqual('201', resp['status'])
+ member = body['member']
+ # Verification of member update
+ admin_state = [False, 'False']
+ resp, body = self.client.update_member(admin_state[0], member['id'])
+ self.assertEqual('200', resp['status'])
+ updated_member = body['member']
+ self.assertIn(updated_member['admin_state_up'], admin_state)
+ # Verification of member delete
+ resp, body = self.client.delete_member(member['id'])
+ self.assertEqual('204', resp['status'])
+
+ @attr(type='smoke')
+ def test_show_member(self):
+ # Verifies the details of a member
+ resp, body = self.client.show_member(self.member['id'])
+ self.assertEqual('200', resp['status'])
+ member = body['member']
+ self.assertEqual(self.member['id'], member['id'])
+ self.assertEqual(self.member['admin_state_up'],
+ member['admin_state_up'])
+
+ @attr(type='smoke')
+ def test_list_health_monitors(self):
+ # Verify the health monitor exists in the list of all health monitors
+ resp, body = self.client.list_health_monitors()
+ self.assertEqual('200', resp['status'])
+ health_monitors = body['health_monitors']
+ self.assertIn(self.health_monitor['id'],
+ [h['id'] for h in health_monitors])
+
+ @attr(type='smoke')
+ def test_create_update_delete_health_monitor(self):
+ # Creates a health_monitor
+ resp, body = self.client.create_health_monitor(4, 3, "TCP", 1)
+ self.assertEqual('201', resp['status'])
+ health_monitor = body['health_monitor']
+ # Verification of health_monitor update
+ admin_state = [False, 'False']
+ resp, body = self.client.update_health_monitor(admin_state[0],
+ health_monitor['id'])
+ self.assertEqual('200', resp['status'])
+ updated_health_monitor = body['health_monitor']
+ self.assertIn(updated_health_monitor['admin_state_up'], admin_state)
+ # Verification of health_monitor delete
+ resp, body = self.client.delete_health_monitor(health_monitor['id'])
+ self.assertEqual('204', resp['status'])
+
+ @attr(type='smoke')
+ def test_show_health_monitor(self):
+ # Verifies the details of a health_monitor
+ resp, body = self.client.show_health_monitor(self.health_monitor['id'])
+ self.assertEqual('200', resp['status'])
+ health_monitor = body['health_monitor']
+ self.assertEqual(self.health_monitor['id'], health_monitor['id'])
+ self.assertEqual(self.health_monitor['admin_state_up'],
+ health_monitor['admin_state_up'])
+
+ @attr(type='smoke')
+ def test_associate_disassociate_health_monitor_with_pool(self):
+ # Verify that a health monitor can be associated with a pool
+ resp, body = (self.client.associate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.assertEqual('201', resp['status'])
+ # Verify that a health monitor can be disassociated from a pool
+ resp, body = (self.client.disassociate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.assertEqual('204', resp['status'])
+
+
+class LoadBalancerXML(LoadBalancerJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 66ca05f..a2b4ab3 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -58,49 +58,11 @@
def setUpClass(cls):
super(NetworksTestJSON, cls).setUpClass()
cls.network = cls.create_network()
- cls.network1 = cls.create_network()
- cls.network2 = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
cls.port = cls.create_port(cls.network)
- def _delete_networks(self, created_networks):
- for n in created_networks:
- resp, body = self.client.delete_network(n['id'])
- self.assertEqual(204, resp.status)
- # Asserting that the networks are not found in the list after deletion
- resp, body = self.client.list_networks()
- networks_list = list()
- for network in body['networks']:
- networks_list.append(network['id'])
- for n in created_networks:
- self.assertNotIn(n['id'], networks_list)
-
- def _delete_subnets(self, created_subnets):
- for n in created_subnets:
- resp, body = self.client.delete_subnet(n['id'])
- self.assertEqual(204, resp.status)
- # Asserting that the subnets are not found in the list after deletion
- resp, body = self.client.list_subnets()
- subnets_list = list()
- for subnet in body['subnets']:
- subnets_list.append(subnet['id'])
- for n in created_subnets:
- self.assertNotIn(n['id'], subnets_list)
-
- def _delete_ports(self, created_ports):
- for n in created_ports:
- resp, body = self.client.delete_port(n['id'])
- self.assertEqual(204, resp.status)
- # Asserting that the ports are not found in the list after deletion
- resp, body = self.client.list_ports()
- ports_list = list()
- for port in body['ports']:
- ports_list.append(port['id'])
- for n in created_ports:
- self.assertNotIn(n['id'], ports_list)
-
@attr(type='smoke')
def test_create_update_delete_network_subnet(self):
# Creates a network
@@ -234,6 +196,81 @@
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
non_exist_id)
+ @attr(type=['negative', 'smoke'])
+ def test_show_non_existent_port(self):
+ non_exist_id = rand_name('port')
+ self.assertRaises(exceptions.NotFound, self.client.show_port,
+ non_exist_id)
+
+
+class NetworksTestXML(NetworksTestJSON):
+ _interface = 'xml'
+
+
+class BulkNetworkOpsJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ bulk network creation
+ bulk subnet creation
+ bulk subnet creation
+ list tenant's networks
+
+ v2.0 of the Neutron API is assumed. It is also assumed that the following
+ options are defined in the [network] section of etc/tempest.conf:
+
+ tenant_network_cidr with a block of cidr's from which smaller blocks
+ can be allocated for tenant networks
+
+ tenant_network_mask_bits with the mask bits to be used to partition the
+ block defined by tenant-network_cidr
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(BulkNetworkOpsJSON, cls).setUpClass()
+ cls.network1 = cls.create_network()
+ cls.network2 = cls.create_network()
+
+ def _delete_networks(self, created_networks):
+ for n in created_networks:
+ resp, body = self.client.delete_network(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the networks are not found in the list after deletion
+ resp, body = self.client.list_networks()
+ networks_list = list()
+ for network in body['networks']:
+ networks_list.append(network['id'])
+ for n in created_networks:
+ self.assertNotIn(n['id'], networks_list)
+
+ def _delete_subnets(self, created_subnets):
+ for n in created_subnets:
+ resp, body = self.client.delete_subnet(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the subnets are not found in the list after deletion
+ resp, body = self.client.list_subnets()
+ subnets_list = list()
+ for subnet in body['subnets']:
+ subnets_list.append(subnet['id'])
+ for n in created_subnets:
+ self.assertNotIn(n['id'], subnets_list)
+
+ def _delete_ports(self, created_ports):
+ for n in created_ports:
+ resp, body = self.client.delete_port(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the ports are not found in the list after deletion
+ resp, body = self.client.list_ports()
+ ports_list = list()
+ for port in body['ports']:
+ ports_list.append(port['id'])
+ for n in created_ports:
+ self.assertNotIn(n['id'], ports_list)
+
@attr(type='smoke')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
@@ -320,5 +357,5 @@
self.assertIn(n['id'], ports_list)
-class NetworksTestXML(NetworksTestJSON):
+class BulkNetworkOpsXML(BulkNetworkOpsJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 9f8c742..8b939fe 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -97,7 +97,7 @@
name = rand_name('router-')
resp, create_body = self.client.create_router(name)
self.addCleanup(self.client.delete_router, create_body['router']['id'])
- # Add router interafce with subnet id
+ # Add router interface with subnet id
resp, interface = self.client.add_router_interface_with_subnet_id(
create_body['router']['id'], subnet['id'])
self.assertEqual('200', resp['status'])
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 24f8286..60ca88a 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -31,7 +31,7 @@
def _delete_security_group(self, secgroup_id):
resp, _ = self.client.delete_security_group(secgroup_id)
self.assertEqual(204, resp.status)
- # Asserting that the secgroup is not found in the list
+ # Asserting that the security group is not found in the list
# after deletion
resp, list_body = self.client.list_security_groups()
self.assertEqual('200', resp['status'])
@@ -43,7 +43,7 @@
def _delete_security_group_rule(self, rule_id):
resp, _ = self.client.delete_security_group_rule(rule_id)
self.assertEqual(204, resp.status)
- # Asserting that the secgroup is not found in the list
+ # Asserting that the security group is not found in the list
# after deletion
resp, list_body = self.client.list_security_group_rules()
self.assertEqual('200', resp['status'])
@@ -88,7 +88,7 @@
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
self.assertIn(group_create_body['security_group']['id'], secgroup_list)
- # No Udpate in security group
+ # No Update in security group
# Create rule
resp, rule_create_body = self.client.create_security_group_rule(
group_create_body['security_group']['id']
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index b443933..d4201ee 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -59,7 +59,7 @@
params = {'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEquals(len(container_list), limit)
+ self.assertEqual(len(container_list), limit)
@attr(type='smoke')
def test_list_containers_with_marker(self):
@@ -70,11 +70,11 @@
params = {'marker': self.containers[-1]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEquals(len(container_list), 0)
+ self.assertEqual(len(container_list), 0)
params = {'marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEquals(len(container_list), self.containers_count / 2 - 1)
+ self.assertEqual(len(container_list), self.containers_count / 2 - 1)
@attr(type='smoke')
def test_list_containers_with_end_marker(self):
@@ -85,11 +85,11 @@
params = {'end_marker': self.containers[0]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEquals(len(container_list), 0)
+ self.assertEqual(len(container_list), 0)
params = {'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEquals(len(container_list), self.containers_count / 2)
+ self.assertEqual(len(container_list), self.containers_count / 2)
@attr(type='smoke')
def test_list_containers_with_limit_and_marker(self):
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index c934020..174c82a 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -147,7 +147,7 @@
@attr(type='slow')
def test_created_network(self):
- """Verifies created netowrk."""
+ """Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
resp, body = self.network_client.show_network(network_id)
self.assertEqual('200', resp['status'])
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index ffe8def..41849d0 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -158,7 +158,7 @@
resp, body = self.client.get_resource(sid, rid)
self.assertEqual('CREATE_COMPLETE', body['resource_status'])
- # fetch the ip address from servers client, since we can't get it
+ # fetch the IP address from servers client, since we can't get it
# from the stack until stack create is complete
resp, server = self.servers_client.get_server(
body['physical_resource_id'])
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index f7f428c..12b03b5 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -69,6 +69,10 @@
fetched_volume['metadata'],
'The fetched Volume is different '
'from the created Volume')
+ if 'imageRef' in kwargs:
+ self.assertEqual(fetched_volume['bootable'], True)
+ if 'imageRef' not in kwargs:
+ self.assertEqual(fetched_volume['bootable'], False)
@attr(type='gate')
def test_volume_get_metadata_none(self):
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 8c39e08..d9c9e48 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -17,8 +17,11 @@
from tempest.api.volume import base
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.test import attr
+LOG = logging.getLogger(__name__)
+
class VolumesListTest(base.BaseVolumeTest):
@@ -27,7 +30,7 @@
ensure that the backing file for the volume group that Nova uses
has space for at least 3 1G volumes!
If you are running a Devstack environment, ensure that the
- VOLUME_BACKING_FILE_SIZE is atleast 4G in your localrc
+ VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
"""
_interface = 'json'
@@ -64,22 +67,17 @@
resp, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
- except Exception:
+ except Exception as exc:
+ LOG.exception(exc)
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
# because the backing file size of the volume group is
- # too small. So, here, we clean up whatever we did manage
- # to create and raise a SkipTest
+ # too small.
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
- msg = ("Failed to create ALL necessary volumes to run "
- "test. This typically means that the backing file "
- "size of the nova-volumes group is too small to "
- "create the 3 volumes needed by this test case")
- raise cls.skipException(msg)
- raise
+ raise exc
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index e2b15a4..014ab32 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -31,8 +31,8 @@
@attr(type='gate')
def test_volume_get_nonexistant_volume_id(self):
- # Should not be able to get a nonexistant volume
- # Creating a nonexistant volume id
+ # Should not be able to get a non-existent volume
+ # Creating a non-existent volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
for i in range(len(volumes)):
@@ -41,14 +41,14 @@
non_exist_id = rand_name('999')
if non_exist_id not in volume_id_list:
break
- # Trying to Get a non existant volume
+ # Trying to Get a non-existent volume
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
@attr(type='gate')
def test_volume_delete_nonexistant_volume_id(self):
- # Should not be able to delete a nonexistant Volume
- # Creating nonexistant volume id
+ # Should not be able to delete a non-existent Volume
+ # Creating non-existent volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
for i in range(len(volumes)):
@@ -57,7 +57,7 @@
non_exist_id = '12345678-abcd-4321-abcd-123456789098'
if non_exist_id not in volume_id_list:
break
- # Try to Delete a non existant volume
+ # Try to delete a non-existent volume
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
non_exist_id)
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index cbb8d08..b082b1e 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -95,9 +95,11 @@
"""Executes given command with auth attributes appended."""
# TODO(jogo) make admin=False work
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
- '--os-auth-url %s ' % (self.identity.admin_username,
- self.identity.admin_tenant_name, self.identity.admin_password,
- self.identity.uri))
+ '--os-auth-url %s ' %
+ (self.identity.admin_username,
+ self.identity.admin_tenant_name,
+ self.identity.admin_password,
+ self.identity.uri))
flags = creds + ' ' + flags
return self.cmd(cmd, action, flags, params, fail_ok)
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index f22ec4e..bb3368f 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -158,7 +158,7 @@
def _table_columns(first_table_row):
"""Find column ranges in output line.
- Return list of touples (start,end) for each column
+ Return list of tuples (start,end) for each column
detected by plus (+) characters in delimiter line.
"""
positions = []
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index 21acae8..3ff997a 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -114,4 +114,7 @@
self.cinder('list', flags='--retries 3')
def test_cinder_region_list(self):
- self.cinder('list', flags='--os-region-name ' + self.identity.region)
+ region = self.config.volume.region
+ if not region:
+ region = self.config.identity.region
+ self.cinder('list', flags='--os-region-name ' + region)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 8dfff6e..81393a9 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -49,7 +49,17 @@
self.service = None
self.token = None
self.base_url = None
- self.region = {'compute': self.config.identity.region}
+ self.region = {}
+ for cfgname in dir(self.config):
+ # Find all config.FOO.catalog_type and assume FOO is a service.
+ cfg = getattr(self.config, cfgname)
+ catalog_type = getattr(cfg, 'catalog_type', None)
+ if not catalog_type:
+ continue
+ service_region = getattr(cfg, 'region', None)
+ if not service_region:
+ service_region = self.config.identity.region
+ self.region[catalog_type] = service_region
self.endpoint_url = 'publicURL'
self.headers = {'Content-Type': 'application/%s' % self.TYPE,
'Accept': 'application/%s' % self.TYPE}
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 2cbb74d..0d0e794 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -52,12 +52,12 @@
return self.ssh_client.test_connection_auth()
def hostname_equals_servername(self, expected_hostname):
- # Get hostname using command "hostname"
+ # Get host name using command "hostname"
actual_hostname = self.ssh_client.exec_command("hostname").rstrip()
return expected_hostname == actual_hostname
def get_files(self, path):
- # Return a list of comma seperated files
+ # Return a list of comma separated files
command = "ls -m " + path
return self.ssh_client.exec_command(command).rstrip('\n').split(', ')
diff --git a/tempest/config.py b/tempest/config.py
index b386968..1b52f5e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -44,7 +44,10 @@
help='Full URI of the OpenStack Identity API (Keystone), v3'),
cfg.StrOpt('region',
default='RegionOne',
- help="The identity region name to use."),
+ help="The identity region name to use. Also used as the other "
+ "services' region name unless they are set explicitly. "
+ "If no such region is found in the service catalog, the "
+ "first found one is used."),
cfg.StrOpt('username',
default='demo',
help="Username to use for Nova API requests."),
@@ -200,6 +203,12 @@
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
+ cfg.StrOpt('region',
+ default='',
+ help="The compute region name to use. If empty, the value "
+ "of identity.region is used instead. If no such region "
+ "is found in the service catalog, the first found one is "
+ "used."),
cfg.StrOpt('path_to_private_key',
default=None,
help="Path to a private key file for SSH access to remote "
@@ -255,6 +264,12 @@
cfg.StrOpt('catalog_type',
default='image',
help='Catalog type of the Image service.'),
+ cfg.StrOpt('region',
+ default='',
+ help="The image region name to use. If empty, the value "
+ "of identity.region is used instead. If no such region "
+ "is found in the service catalog, the first found one is "
+ "used."),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
@@ -275,6 +290,12 @@
cfg.StrOpt('catalog_type',
default='network',
help='Catalog type of the Neutron service.'),
+ cfg.StrOpt('region',
+ default='',
+ help="The network region name to use. If empty, the value "
+ "of identity.region is used instead. If no such region "
+ "is found in the service catalog, the first found one is "
+ "used."),
cfg.StrOpt('tenant_network_cidr',
default="10.100.0.0/16",
help="The cidr block to allocate tenant networks from"),
@@ -315,6 +336,12 @@
cfg.StrOpt('catalog_type',
default='Volume',
help="Catalog type of the Volume Service"),
+ cfg.StrOpt('region',
+ default='',
+ help="The volume region name to use. If empty, the value "
+ "of identity.region is used instead. If no such region "
+ "is found in the service catalog, the first found one is "
+ "used."),
cfg.BoolOpt('multi_backend_enabled',
default=False,
help="Runs Cinder multi-backend test (requires 2 backends)"),
@@ -349,6 +376,12 @@
cfg.StrOpt('catalog_type',
default='object-store',
help="Catalog type of the Object-Storage service."),
+ cfg.StrOpt('region',
+ default='',
+ help="The object-storage region name to use. If empty, the "
+ "value of identity.region is used instead. If no such "
+ "region is found in the service catalog, the first found "
+ "one is used."),
cfg.StrOpt('container_sync_timeout',
default=120,
help="Number of seconds to time on waiting for a container"
@@ -380,6 +413,12 @@
cfg.StrOpt('catalog_type',
default='orchestration',
help="Catalog type of the Orchestration service."),
+ cfg.StrOpt('region',
+ default='',
+ help="The orchestration region name to use. If empty, the "
+ "value of identity.region is used instead. If no such "
+ "region is found in the service catalog, the first found "
+ "one is used."),
cfg.BoolOpt('allow_tenant_isolation',
default=False,
help="Allows test cases to create/destroy tenants and "
diff --git a/tempest/openstack/common/excutils.py b/tempest/openstack/common/excutils.py
index 81aad14..db37660 100644
--- a/tempest/openstack/common/excutils.py
+++ b/tempest/openstack/common/excutils.py
@@ -24,6 +24,8 @@
import time
import traceback
+import six
+
from tempest.openstack.common.gettextutils import _ # noqa
@@ -65,7 +67,7 @@
self.tb))
return False
if self.reraise:
- raise self.type_, self.value, self.tb
+ six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
@@ -77,7 +79,8 @@
try:
return infunc(*args, **kwargs)
except Exception as exc:
- if exc.message == last_exc_message:
+ this_exc_message = unicode(exc)
+ if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
@@ -85,12 +88,12 @@
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
- exc.message != last_exc_message):
+ this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
- last_exc_message = exc.message
+ last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
diff --git a/tempest/openstack/common/fileutils.py b/tempest/openstack/common/fileutils.py
index d2e3d3e..6cf68ba 100644
--- a/tempest/openstack/common/fileutils.py
+++ b/tempest/openstack/common/fileutils.py
@@ -69,33 +69,34 @@
return (reloaded, cache_info['data'])
-def delete_if_exists(path):
+def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
+ :param remove: Optional function to remove passed path
"""
try:
- os.unlink(path)
+ remove(path)
except OSError as e:
- if e.errno == errno.ENOENT:
- return
- else:
+ if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
-def remove_path_on_error(path):
+def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
+ :param remove: Optional function to remove passed path
"""
+
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
- delete_if_exists(path)
+ remove(path)
def file_open(*args, **kwargs):
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
index 8594937..cbf570a 100644
--- a/tempest/openstack/common/gettextutils.py
+++ b/tempest/openstack/common/gettextutils.py
@@ -1,8 +1,8 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
-# All Rights Reserved.
# Copyright 2013 IBM Corp.
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -26,22 +26,44 @@
import copy
import gettext
-import logging.handlers
+import logging
import os
import re
-import UserString
+try:
+ import UserString as _userString
+except ImportError:
+ import collections as _userString
+from babel import localedata
import six
_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
+_AVAILABLE_LANGUAGES = {}
+USE_LAZY = False
+
+
+def enable_lazy():
+ """Convenience function for configuring _() to use lazy gettext
+
+ Call this at the start of execution to enable the gettextutils._
+ function to use lazy gettext functionality. This is useful if
+ your project is importing _ directly instead of using the
+ gettextutils.install() way of importing the _ function.
+ """
+ global USE_LAZY
+ USE_LAZY = True
+
def _(msg):
- return _t.ugettext(msg)
+ if USE_LAZY:
+ return Message(msg, 'tempest')
+ else:
+ return _t.ugettext(msg)
-def install(domain):
+def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
@@ -51,44 +73,48 @@
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
+
+ :param domain: the translation domain
+ :param lazy: indicates whether or not to install the lazy _() function.
+ The lazy _() introduces a way to do deferred translation
+ of messages by installing a _ that builds Message objects,
+ instead of strings, which can then be lazily translated into
+ any available locale.
"""
- gettext.install(domain,
- localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
- unicode=True)
+ if lazy:
+ # NOTE(mrodden): Lazy gettext functionality.
+ #
+ # The following introduces a deferred way to do translations on
+ # messages in OpenStack. We override the standard _() function
+ # and % (format string) operation to build Message objects that can
+ # later be translated when we have more information.
+ #
+ # Also included below is an example LocaleHandler that translates
+ # Messages to an associated locale, effectively allowing many logs,
+ # each with their own locale.
+
+ def _lazy_gettext(msg):
+ """Create and return a Message object.
+
+ Lazy gettext function for a given domain, it is a factory method
+ for a project/module to get a lazy gettext function for its own
+ translation domain (i.e. nova, glance, cinder, etc.)
+
+ Message encapsulates a string so that we can translate
+ it later when needed.
+ """
+ return Message(msg, domain)
+
+ import __builtin__
+ __builtin__.__dict__['_'] = _lazy_gettext
+ else:
+ localedir = '%s_LOCALEDIR' % domain.upper()
+ gettext.install(domain,
+ localedir=os.environ.get(localedir),
+ unicode=True)
-"""
-Lazy gettext functionality.
-
-The following is an attempt to introduce a deferred way
-to do translations on messages in OpenStack. We attempt to
-override the standard _() function and % (format string) operation
-to build Message objects that can later be translated when we have
-more information. Also included is an example LogHandler that
-translates Messages to an associated locale, effectively allowing
-many logs, each with their own locale.
-"""
-
-
-def get_lazy_gettext(domain):
- """Assemble and return a lazy gettext function for a given domain.
-
- Factory method for a project/module to get a lazy gettext function
- for its own translation domain (i.e. nova, glance, cinder, etc.)
- """
-
- def _lazy_gettext(msg):
- """Create and return a Message object.
-
- Message encapsulates a string so that we can translate it later when
- needed.
- """
- return Message(msg, domain)
-
- return _lazy_gettext
-
-
-class Message(UserString.UserString, object):
+class Message(_userString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
@@ -130,7 +156,7 @@
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
- keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
+ keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
@@ -229,7 +255,47 @@
if name in ops:
return getattr(self.data, name)
else:
- return UserString.UserString.__getattribute__(self, name)
+ return _userString.UserString.__getattribute__(self, name)
+
+
+def get_available_languages(domain):
+ """Lists the available languages for the given translation domain.
+
+ :param domain: the domain to get languages for
+ """
+ if domain in _AVAILABLE_LANGUAGES:
+ return copy.copy(_AVAILABLE_LANGUAGES[domain])
+
+ localedir = '%s_LOCALEDIR' % domain.upper()
+ find = lambda x: gettext.find(domain,
+ localedir=os.environ.get(localedir),
+ languages=[x])
+
+ # NOTE(mrodden): en_US should always be available (and first in case
+ # order matters) since our in-line message strings are en_US
+ language_list = ['en_US']
+ # NOTE(luisg): Babel <1.0 used a function called list(), which was
+ # renamed to locale_identifiers() in >=1.0, the requirements master list
+ # requires >=0.9.6, uncapped, so defensively work with both. We can remove
+ # this check when the master list updates to >=1.0, and all projects udpate
+ list_identifiers = (getattr(localedata, 'list', None) or
+ getattr(localedata, 'locale_identifiers'))
+ locale_identifiers = list_identifiers()
+ for i in locale_identifiers:
+ if find(i) is not None:
+ language_list.append(i)
+ _AVAILABLE_LANGUAGES[domain] = language_list
+ return copy.copy(language_list)
+
+
+def get_localized_message(message, user_locale):
+ """Gets a localized version of the given message in the given locale."""
+ if isinstance(message, Message):
+ if user_locale:
+ message.locale = user_locale
+ return unicode(message)
+ else:
+ return message
class LocaleHandler(logging.Handler):
diff --git a/tempest/openstack/common/jsonutils.py b/tempest/openstack/common/jsonutils.py
index bd43e59..c568a06 100644
--- a/tempest/openstack/common/jsonutils.py
+++ b/tempest/openstack/common/jsonutils.py
@@ -38,14 +38,18 @@
import inspect
import itertools
import json
-import types
-import xmlrpclib
+try:
+ import xmlrpclib
+except ImportError:
+ # NOTE(jd): xmlrpclib is not shipped with Python 3
+ xmlrpclib = None
-import netaddr
import six
+from tempest.openstack.common import importutils
from tempest.openstack.common import timeutils
+netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
@@ -53,7 +57,8 @@
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
-_simple_types = (types.NoneType, int, basestring, bool, float, long)
+_simple_types = (six.string_types + six.integer_types
+ + (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
@@ -125,7 +130,7 @@
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
- if isinstance(value, xmlrpclib.DateTime):
+ if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
@@ -138,7 +143,7 @@
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
- elif isinstance(value, netaddr.IPAddress):
+ elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
diff --git a/tempest/openstack/common/local.py b/tempest/openstack/common/local.py
index f1bfc82..e82f17d 100644
--- a/tempest/openstack/common/local.py
+++ b/tempest/openstack/common/local.py
@@ -15,16 +15,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Greenthread local storage of variables using weak references"""
+"""Local storage of variables using weak references"""
+import threading
import weakref
-from eventlet import corolocal
-
-class WeakLocal(corolocal.local):
+class WeakLocal(threading.local):
def __getattribute__(self, attr):
- rval = corolocal.local.__getattribute__(self, attr)
+ rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
@@ -34,7 +33,7 @@
def __setattr__(self, attr, value):
value = weakref.ref(value)
- return corolocal.local.__setattr__(self, attr, value)
+ return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
@@ -45,4 +44,4 @@
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
-strong_store = corolocal.local
+strong_store = threading.local()
diff --git a/tempest/openstack/common/lockutils.py b/tempest/openstack/common/lockutils.py
index 3ff1a7a..0abd1a7 100644
--- a/tempest/openstack/common/lockutils.py
+++ b/tempest/openstack/common/lockutils.py
@@ -20,10 +20,10 @@
import errno
import functools
import os
+import threading
import time
import weakref
-from eventlet import semaphore
from oslo.config import cfg
from tempest.openstack.common import fileutils
@@ -137,7 +137,8 @@
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
- This function yields a `semaphore.Semaphore` instance unless external is
+ This function yields a `threading.Semaphore` instance (if we don't use
+ eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
@@ -155,7 +156,7 @@
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
- sem = _semaphores.get(name, semaphore.Semaphore())
+ sem = _semaphores.get(name, threading.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
diff --git a/tempest/openstack/common/timeutils.py b/tempest/openstack/common/timeutils.py
index bd60489..60f02bc 100644
--- a/tempest/openstack/common/timeutils.py
+++ b/tempest/openstack/common/timeutils.py
@@ -21,6 +21,7 @@
import calendar
import datetime
+import time
import iso8601
import six
@@ -49,9 +50,9 @@
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
- raise ValueError(e.message)
+ raise ValueError(unicode(e))
except TypeError as e:
- raise ValueError(e.message)
+ raise ValueError(unicode(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
@@ -90,6 +91,11 @@
def utcnow_ts():
"""Timestamp version of our utcnow function."""
+ if utcnow.override_time is None:
+ # NOTE(kgriffs): This is several times faster
+ # than going through calendar.timegm(...)
+ return int(time.time())
+
return calendar.timegm(utcnow().timetuple())
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 21c37b9..9137b93 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -425,7 +425,7 @@
self.addCleanup(image_client.images.delete, image_id)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
- self.assertEquals(name, snapshot_image.name)
+ self.assertEqual(name, snapshot_image.name)
LOG.debug("Created snapshot image %s for server %s",
snapshot_image.name, server.name)
return snapshot_image
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index 88f2ebd..1a4d802 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -85,7 +85,7 @@
def server_count():
# the number of servers is the number of resources
- # in the nexted stack
+ # in the nested stack
self.server_count = len(
self.client.resources.list(nested_stack_id))
return self.server_count
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 662e919..9d7086c 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -62,7 +62,7 @@
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
- faciliate external connectivity to a potentially unroutable
+ facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index cf72cd4..9c50489 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import services
@@ -49,18 +48,7 @@
@services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
- i_name = rand_name('instance')
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
- self.instance = self.compute_client.servers.create(
- i_name, base_image_id, flavor_id)
- self.assertEqual(self.instance.name, i_name)
- self.set_resource('instance', self.instance)
- self.assertEqual(self.instance.status, 'BUILD')
- instance_id = self.get_resource('instance').id
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
- instance = self.get_resource('instance')
+ instance = self.create_server(self.compute_client)
instance_id = instance.id
resize_flavor = self.config.compute.flavor_ref_alt
LOG.debug("Resizing instance %s from flavor %s to flavor %s",
@@ -78,18 +66,7 @@
@services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
- i_name = rand_name('instance')
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
- self.instance = self.compute_client.servers.create(
- i_name, base_image_id, flavor_id)
- self.assertEqual(self.instance.name, i_name)
- self.set_resource('instance', self.instance)
- self.assertEqual(self.instance.status, 'BUILD')
- instance_id = self.get_resource('instance').id
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
- instance = self.get_resource('instance')
+ instance = self.create_server(self.compute_client)
instance_id = instance.id
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance.status)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c5a4aaf..ab464e3 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -92,7 +92,7 @@
self.addCleanup(cleaner)
self._wait_for_volume_status(volume, 'available')
self._wait_for_volume_snapshot_status(snapshot, 'available')
- self.assertEquals(snapshot_name, snapshot.display_name)
+ self.assertEqual(snapshot_name, snapshot.display_name)
return snapshot
def _wait_for_volume_status(self, volume, status):
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
index 66fb7af..a689c89 100644
--- a/tempest/services/botoclients.py
+++ b/tempest/services/botoclients.py
@@ -111,7 +111,10 @@
aws_secret = config.boto.aws_secret
purl = urlparse.urlparse(config.boto.ec2_url)
- region = boto.ec2.regioninfo.RegionInfo(name=config.identity.region,
+ region_name = config.compute.region
+ if not region_name:
+ region_name = config.identity.region
+ region = boto.ec2.regioninfo.RegionInfo(name=region_name,
endpoint=purl.hostname)
port = purl.port
if port is None:
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 10e64f8..369dd81 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -401,3 +401,200 @@
resp, body = self.post(uri, headers=self.headers, body=body)
body = json.loads(body)
return resp, body
+
+ def list_vips(self):
+ uri = '%s/lb/vips' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id):
+ post_body = {
+ "vip": {
+ "protocol": protocol,
+ "name": name,
+ "subnet_id": subnet_id,
+ "pool_id": pool_id,
+ "protocol_port": protocol_port
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/vips' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def create_pool(self, name, lb_method, protocol, subnet_id):
+ post_body = {
+ "pool": {
+ "protocol": protocol,
+ "name": name,
+ "subnet_id": subnet_id,
+ "lb_method": lb_method
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/pools' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def show_vip(self, uuid):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, uuid)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_vip(self, uuid):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, uuid)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def delete_pool(self, uuid):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, uuid)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def update_vip(self, vip_id, new_name):
+ put_body = {
+ "vip": {
+ "name": new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, vip_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_pool(self, pool_id, new_name):
+ put_body = {
+ "pool": {
+ "name": new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, pool_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def list_pools(self):
+ uri = '%s/lb/pools' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def show_pool(self, uuid):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, uuid)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def list_members(self):
+ uri = '%s/lb/members' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_member(self, address, protocol_port, pool_id):
+ post_body = {
+ "member": {
+ "protocol_port": protocol_port,
+ "pool_id": pool_id,
+ "address": address
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/members' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def show_member(self, uuid):
+ uri = '%s/lb/members/%s' % (self.uri_prefix, uuid)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_member(self, uuid):
+ uri = '%s/lb/members/%s' % (self.uri_prefix, uuid)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def update_member(self, admin_state_up, member_id):
+ put_body = {
+ "member": {
+ "admin_state_up": admin_state_up
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/lb/members/%s' % (self.uri_prefix, member_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def list_health_monitors(self):
+ uri = '%s/lb/health_monitors' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_health_monitor(self, delay, max_retries, Type, timeout):
+ post_body = {
+ "health_monitor": {
+ "delay": delay,
+ "max_retries": max_retries,
+ "type": Type,
+ "timeout": timeout
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/health_monitors' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def show_health_monitor(self, uuid):
+ uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_health_monitor(self, uuid):
+ uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def update_health_monitor(self, admin_state_up, uuid):
+ put_body = {
+ "health_monitor": {
+ "admin_state_up": admin_state_up
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def associate_health_monitor_with_pool(self, health_monitor_id,
+ pool_id):
+ post_body = {
+ "health_monitor": {
+ "id": health_monitor_id,
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix,
+ pool_id)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def disassociate_health_monitor_with_pool(self, health_monitor_id,
+ pool_id):
+ uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id,
+ health_monitor_id)
+ resp, body = self.delete(uri, headers=self.headers)
+ return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 8303bc0..a9b5512 100755
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -249,6 +249,185 @@
ports = {"ports": ports}
return resp, ports
+ def list_vips(self):
+ url = '%s/lb/vips' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ vips = self._parse_array(etree.fromstring(body))
+ vips = {"vips": vips}
+ return resp, vips
+
+ def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id):
+ uri = '%s/lb/vips' % (self.uri_prefix)
+ post_body = Element("vip")
+ p1 = Element("name", name)
+ p2 = Element("protocol", protocol)
+ p3 = Element("protocol_port", protocol_port)
+ p4 = Element("subnet_id", subnet_id)
+ p5 = Element("pool_id", pool_id)
+ post_body.append(p1)
+ post_body.append(p2)
+ post_body.append(p3)
+ post_body.append(p4)
+ post_body.append(p5)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_vip(self, vip_id):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, str(vip_id))
+ return self.delete(uri, self.headers)
+
+ def show_vip(self, vip_id):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, str(vip_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_vip(self, vip_id, new_name):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, str(vip_id))
+ put_body = Element("vip")
+ p2 = Element("name", new_name)
+ put_body.append(p2)
+ resp, body = self.put(uri, str(Document(put_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_pools(self):
+ url = '%s/lb/pools' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ pools = self._parse_array(etree.fromstring(body))
+ pools = {"pools": pools}
+ return resp, pools
+
+ def create_pool(self, name, lb_method, protocol, subnet_id):
+ uri = '%s/lb/pools' % (self.uri_prefix)
+ post_body = Element("pool")
+ p1 = Element("lb_method", lb_method)
+ p2 = Element("protocol", protocol)
+ p3 = Element("subnet_id", subnet_id)
+ post_body.append(p1)
+ post_body.append(p2)
+ post_body.append(p3)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_pool(self, pool_id):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, str(pool_id))
+ return self.delete(uri, self.headers)
+
+ def show_pool(self, pool_id):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, str(pool_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_pool(self, pool_id, new_name):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, str(pool_id))
+ put_body = Element("pool")
+ p2 = Element("name", new_name)
+ put_body.append(p2)
+ resp, body = self.put(uri, str(Document(put_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_members(self):
+ url = '%s/lb/members' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ members = self._parse_array(etree.fromstring(body))
+ members = {"members": members}
+ return resp, members
+
+ def create_member(self, address, protocol_port, pool_id):
+ uri = '%s/lb/members' % (self.uri_prefix)
+ post_body = Element("member")
+ p1 = Element("address", address)
+ p2 = Element("protocol_port", protocol_port)
+ p3 = Element("pool_id", pool_id)
+ post_body.append(p1)
+ post_body.append(p2)
+ post_body.append(p3)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_member(self, member_id):
+ uri = '%s/lb/members/%s' % (self.uri_prefix, str(member_id))
+ return self.delete(uri, self.headers)
+
+ def show_member(self, member_id):
+ uri = '%s/lb/members/%s' % (self.uri_prefix, str(member_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_member(self, admin_state_up, member_id):
+ uri = '%s/lb/members/%s' % (self.uri_prefix, str(member_id))
+ put_body = Element("member")
+ p2 = Element("admin_state_up", admin_state_up)
+ put_body.append(p2)
+ resp, body = self.put(uri, str(Document(put_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_health_monitors(self):
+ uri = '%s/lb/health_monitors' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = self._parse_array(etree.fromstring(body))
+ body = {"health_monitors": body}
+ return resp, body
+
+ def create_health_monitor(self, delay, max_retries, Type, timeout):
+ uri = '%s/lb/health_monitors' % (self.uri_prefix)
+ post_body = Element("health_monitor")
+ p1 = Element("delay", delay)
+ p2 = Element("max_retries", max_retries)
+ p3 = Element("type", Type)
+ p4 = Element("timeout", timeout)
+ post_body.append(p1)
+ post_body.append(p2)
+ post_body.append(p3)
+ post_body.append(p4)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_health_monitor(self, uuid):
+ uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, str(uuid))
+ return self.delete(uri, self.headers)
+
+ def show_health_monitor(self, uuid):
+ uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_health_monitor(self, admin_state_up, uuid):
+ uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, str(uuid))
+ put_body = Element("health_monitor")
+ p2 = Element("admin_state_up", admin_state_up)
+ put_body.append(p2)
+ resp, body = self.put(uri, str(Document(put_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def associate_health_monitor_with_pool(self, health_monitor_id,
+ pool_id):
+ uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix,
+ pool_id)
+ post_body = Element("health_monitor")
+ p1 = Element("id", health_monitor_id,)
+ post_body.append(p1)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def disassociate_health_monitor_with_pool(self, health_monitor_id,
+ pool_id):
+ uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id,
+ health_monitor_id)
+ return self.delete(uri, self.headers)
+
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index dd5f3ec..75f7a33 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -35,7 +35,7 @@
metadata_prefix='X-Container-Meta-'):
"""
Creates a container, with optional metadata passed in as a
- dictonary
+ dictionary
"""
url = str(container_name)
headers = {}
@@ -92,9 +92,9 @@
"""
Returns complete list of all objects in the container, even if
item count is beyond 10,000 item listing limit.
- Does not require any paramaters aside from container name.
+ Does not require any parameters aside from container name.
"""
- # TODO(dwalleck): Rewite using json format to avoid newlines at end of
+ # TODO(dwalleck): Rewrite using json format to avoid newlines at end of
# obj names. Set limit to API limit - 1 (max returned items = 9999)
limit = 9999
if params is not None:
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 1c97869..c605a45 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -126,7 +126,7 @@
return resp, body
def get_object_using_temp_url(self, container, object_name, expires, key):
- """Retrieve object's data using temp URL."""
+ """Retrieve object's data using temporary URL."""
self._set_auth()
method = 'GET'
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 51c46da..3596017 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -81,7 +81,7 @@
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
- # NOTE(afazekas): it should use the volume namaspace
+ # NOTE(afazekas): it should use the volume namespace
snapshot = Element("snapshot", xmlns=XMLNS_11, volume_id=volume_id)
for key, value in kwargs.items():
snapshot.add_attr(key, value)
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index ecbfb19..9fa7a1e 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -60,6 +60,21 @@
"""Return the element 'attachment' from input volumes."""
return volume['attachments']['attachment']
+ def _check_if_bootable(self, volume):
+ """
+ Check if the volume is bootable, also change the value
+ of 'bootable' from string to boolean.
+ """
+ if volume['bootable'] == 'True':
+ volume['bootable'] = True
+ elif volume['bootable'] == 'False':
+ volume['bootable'] = False
+ else:
+ raise ValueError(
+ 'bootable flag is supposed to be either True or False,'
+ 'it is %s' % volume['bootable'])
+ return volume
+
def list_volumes(self, params=None):
"""List all the volumes created."""
url = 'volumes'
@@ -72,6 +87,8 @@
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
+ for v in volumes:
+ v = self._check_if_bootable(v)
return resp, volumes
def list_volumes_with_detail(self, params=None):
@@ -86,14 +103,17 @@
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
+ for v in volumes:
+ v = self._check_if_bootable(v)
return resp, volumes
def get_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % str(volume_id)
resp, body = self.get(url, self.headers)
- body = etree.fromstring(body)
- return resp, self._parse_volume(body)
+ body = self._parse_volume(etree.fromstring(body))
+ body = self._check_if_bootable(body)
+ return resp, body
def create_volume(self, size, **kwargs):
"""Creates a new Volume.
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index 28251af..45a628d 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -42,7 +42,7 @@
def setUp(self, **kwargs):
"""This method is called before the run method
- to help the test initiatlize any structures.
+ to help the test initialize any structures.
kwargs contains arguments passed in from the
configuration json file.
@@ -59,7 +59,7 @@
def execute(self, shared_statistic):
"""This is the main execution entry point called
by the driver. We register a signal handler to
- allow us to gracefull tearDown, and then exit.
+ allow us to tearDown gracefully, and then exit.
We also keep track of how many runs we do.
"""
signal.signal(signal.SIGHUP, self._shutdown_handler)
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index aeea98d..1a5af00 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -60,7 +60,7 @@
subprocess.call(['git', 'init'])
exit_code = subprocess.call('sh pretty_tox.sh tests.passing',
shell=True, stdout=DEVNULL, stderr=DEVNULL)
- self.assertEquals(exit_code, 0)
+ self.assertEqual(exit_code, 0)
@attr(type='smoke')
def test_pretty_tox_fails(self):
@@ -76,7 +76,7 @@
subprocess.call(['git', 'init'])
exit_code = subprocess.call('sh pretty_tox.sh', shell=True,
stdout=DEVNULL, stderr=DEVNULL)
- self.assertEquals(exit_code, 1)
+ self.assertEqual(exit_code, 1)
@attr(type='smoke')
def test_pretty_tox_serial(self):
@@ -88,7 +88,7 @@
os.chdir(self.directory)
exit_code = subprocess.call('sh pretty_tox_serial.sh tests.passing',
shell=True, stdout=DEVNULL, stderr=DEVNULL)
- self.assertEquals(exit_code, 0)
+ self.assertEqual(exit_code, 0)
@attr(type='smoke')
def test_pretty_tox_serial_fails(self):
@@ -100,4 +100,4 @@
os.chdir(self.directory)
exit_code = subprocess.call('sh pretty_tox_serial.sh', shell=True,
stdout=DEVNULL, stderr=DEVNULL)
- self.assertEquals(exit_code, 1)
+ self.assertEqual(exit_code, 1)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index a848fc9..7fab364 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -176,25 +176,25 @@
instance.add_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
- self.assertEquals(tags[0].name, 'key1')
- self.assertEquals(tags[0].value, 'value1')
+ self.assertEqual(tags[0].name, 'key1')
+ self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
- self.assertEquals(tags[0].name, 'key1')
- self.assertEquals(tags[0].value, 'value1')
+ self.assertEqual(tags[0].name, 'key1')
+ self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
- self.assertEquals(tags[0].name, 'key1')
- self.assertEquals(tags[0].value, 'value1')
+ self.assertEqual(tags[0].name, 'key1')
+ self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
- self.assertEquals(len(tags), 0)
+ self.assertEqual(len(tags), 0)
for instance in reservation.instances:
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
- self.assertEquals(len(tags), 0)
+ self.assertEqual(len(tags), 0)
for instance in reservation.instances:
instance.stop()
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
index 1507deb..a44e283 100644
--- a/tempest/thirdparty/boto/utils/wait.py
+++ b/tempest/thirdparty/boto/utils/wait.py
@@ -54,8 +54,7 @@
raise TestCase.failureException("State change timeout exceeded!"
'(%ds) While waiting'
'for %s at "%s"' %
- (dtime,
- final_set, status))
+ (dtime, final_set, status))
time.sleep(default_check_interval)
old_status = status
status = lfunction()
@@ -78,8 +77,7 @@
raise TestCase.failureException('Pattern find timeout exceeded!'
'(%ds) While waiting for'
'"%s" pattern in "%s"' %
- (dtime,
- regexp, text))
+ (dtime, regexp, text))
time.sleep(default_check_interval)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index f428c1e..0999e2c 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -114,9 +114,10 @@
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
- # setuptools.
- self.pip_install('pip>=1.3')
+ # setuptools and pbr
+ self.pip_install('pip>=1.4')
self.pip_install('setuptools')
+ self.pip_install('pbr')
self.pip_install('-r', self.requirements)
self.pip_install('-r', self.test_requirements)
@@ -201,12 +202,13 @@
RHEL: https://bugzilla.redhat.com/958868
"""
- # Install "patch" program if it's not there
- if not self.check_pkg('patch'):
- self.die("Please install 'patch'.")
+ if os.path.exists('contrib/redhat-eventlet.patch'):
+ # Install "patch" program if it's not there
+ if not self.check_pkg('patch'):
+ self.die("Please install 'patch'.")
- # Apply the eventlet patch
- self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
- 'site-packages',
- 'eventlet/green/subprocess.py'),
- 'contrib/redhat-eventlet.patch')
+ # Apply the eventlet patch
+ self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
+ 'site-packages',
+ 'eventlet/green/subprocess.py'),
+ 'contrib/redhat-eventlet.patch')