Merge "Add test for swift ACLs"
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f012097..00c4e9a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -33,6 +33,13 @@
field_guide/thirdparty
field_guide/whitebox
+------------------
+API and test cases
+------------------
+.. toctree::
+ :maxdepth: 1
+
+ api/modules
==================
Indices and tables
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 825965f..12a57db 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,5 +1,13 @@
[DEFAULT]
# log_config = /opt/stack/tempest/etc/logging.conf.sample
+
+# disable logging to the stderr
+use_stderr = False
+
+# log file
+log_file = tempest.log
+
+# lock/semaphore base directory
lock_path=/tmp
[identity]
@@ -90,7 +98,7 @@
fixed_network_name = private
# Network id used for SSH (public, private, etc)
-network_for_ssh = private
+network_for_ssh = public
# IP version of the address used for SSH
ip_version_for_ssh = 4
@@ -259,6 +267,8 @@
# Number of seconds to wait while looping to check the status of a
# container to container synchronization
container_sync_interval = 5
+# Set to True if the Account Quota middleware is enabled
+accounts_quotas_available = True
[boto]
# This section contains configuration options used when executing tests
@@ -357,6 +367,8 @@
enabled = True
# directory where python client binaries are located
cli_dir = /usr/local/bin
+# Number of seconds to wait on a CLI timeout
+timeout = 15
[service_available]
# Whether or not cinder is expected to be available
diff --git a/setup.cfg b/setup.cfg
index 3b13b60..7cfc4ce 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -35,3 +35,6 @@
# coverage http://pypi.python.org/pypi/coverage
# openstack-nose https://github.com/openstack-dev/openstack-nose
verbosity=2
+
+[pbr]
+autodoc_tree_index_modules=true
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 107d635..303bc0c 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -146,6 +146,7 @@
self.client.get_aggregate, -1)
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -167,6 +168,7 @@
self.assertNotIn(self.host, body['hosts'])
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -184,6 +186,7 @@
self.assertIn(self.host, agg['hosts'])
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -245,6 +248,7 @@
aggregate['id'], self.host)
@attr(type=['negative', 'gate'])
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_remove_host_as_user(self):
# Regular user is not allowed to remove a host from an aggregate.
aggregate_name = rand_name(self.aggregate_name_prefix)
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 6db20f9..6d0a5b5 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -60,7 +60,7 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -87,7 +87,7 @@
if self._interface == "json":
self.assertEqual(flavor['os-flavor-access:is_public'], True)
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavor = self.client.get_flavor_details(new_flavor_id)
self.assertEqual(resp.status, 200)
self.assertEqual(flavor['name'], flavor_name)
@@ -99,7 +99,7 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -109,7 +109,7 @@
rxtx=self.rxtx)
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
@@ -153,12 +153,12 @@
@attr(type='gate')
def test_create_list_flavor_without_extra_data(self):
- #Create a flavor and ensure it is listed
- #This operation requires the user to have 'admin' role
+ # Create a flavor and ensure it is listed
+ # This operation requires the user to have 'admin' role
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -181,11 +181,11 @@
if self._interface == "json":
self.assertEqual(flavor['os-flavor-access:is_public'], True)
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavor = self.client.get_flavor_details(new_flavor_id)
self.assertEqual(resp.status, 200)
self.assertEqual(flavor['name'], flavor_name)
- #Check if flavor is present in list
+ # Check if flavor is present in list
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
@@ -195,13 +195,13 @@
@attr(type='gate')
def test_flavor_not_public_verify_entry_not_in_list_details(self):
- #Create a flavor with os-flavor-access:is_public false should not
- #be present in list_details.
- #This operation requires the user to have 'admin' role
+ # Create a flavor with os-flavor-access:is_public false should not
+ # be present in list_details.
+ # This operation requires the user to have 'admin' role
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -209,7 +209,7 @@
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
@@ -219,12 +219,12 @@
@attr(type='gate')
def test_list_public_flavor_with_other_user(self):
- #Create a Flavor with public access.
- #Try to List/Get flavor with another user
+ # Create a Flavor with public access.
+ # Try to List/Get flavor with another user
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -233,7 +233,7 @@
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
self.new_client = self.flavors_client
- #Verify flavor is retrieved with new user
+ # Verify flavor is retrieved with new user
resp, flavors = self.new_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 63d5025..107b23d 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -52,7 +52,7 @@
@attr(type='gate')
def test_flavor_access_add_remove(self):
- #Test to add and remove flavor access to a given tenant.
+ # Test to add and remove flavor access to a given tenant.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -61,7 +61,7 @@
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
- #Add flavor access to a tenant.
+ # Add flavor access to a tenant.
resp_body = {
"tenant_id": str(self.tenant_id),
"flavor_id": str(new_flavor['id']),
@@ -71,25 +71,25 @@
self.assertEqual(add_resp.status, 200)
self.assertIn(resp_body, add_body)
- #The flavor is present in list.
+ # The flavor is present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
- #Remove flavor access from a tenant.
+ # Remove flavor access from a tenant.
remove_resp, remove_body = \
self.client.remove_flavor_access(new_flavor['id'], self.tenant_id)
self.assertEqual(remove_resp.status, 200)
self.assertNotIn(resp_body, remove_body)
- #The flavor is not present in list.
+ # The flavor is not present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_add(self):
- #Test to add flavor access as a user without admin privileges.
+ # Test to add flavor access as a user without admin privileges.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -105,7 +105,7 @@
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_remove(self):
- #Test to remove flavor access as a user without admin privileges.
+ # Test to remove flavor access as a user without admin privileges.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -114,7 +114,7 @@
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
- #Add flavor access to a tenant.
+ # Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index db376b5..7b79a12 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -47,7 +47,7 @@
cls.new_flavor_id = 12345
swap = 1024
rxtx = 1
- #Create a flavor so as to set/get/unset extra specs
+ # Create a flavor so as to set/get/unset extra specs
resp, cls.flavor = cls.client.create_flavor(flavor_name,
ram, vcpus,
disk,
@@ -62,28 +62,28 @@
@attr(type='gate')
def test_flavor_set_get_unset_keys(self):
- #Test to SET, GET UNSET flavor extra spec as a user
- #with admin privileges.
- #Assigning extra specs values that are to be set
+ # Test to SET, GET UNSET flavor extra spec as a user
+ # with admin privileges.
+ # Assigning extra specs values that are to be set
specs = {"key1": "value1", "key2": "value2"}
- #SET extra specs to the flavor created in setUp
+ # SET extra specs to the flavor created in setUp
set_resp, set_body = \
self.client.set_flavor_extra_spec(self.flavor['id'], specs)
self.assertEqual(set_resp.status, 200)
self.assertEqual(set_body, specs)
- #GET extra specs and verify
+ # GET extra specs and verify
get_resp, get_body = \
self.client.get_flavor_extra_spec(self.flavor['id'])
self.assertEqual(get_resp.status, 200)
self.assertEqual(get_body, specs)
- #UNSET extra specs that were set in this test
+ # UNSET extra specs that were set in this test
unset_resp, _ = \
self.client.unset_flavor_extra_spec(self.flavor['id'], "key1")
self.assertEqual(unset_resp.status, 200)
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_set_keys(self):
- #Test to SET flavor extra spec as a user without admin privileges.
+ # Test to SET flavor extra spec as a user without admin privileges.
specs = {"key1": "value1", "key2": "value2"}
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.set_flavor_extra_spec,
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 3e98029..d8d162e 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -36,7 +36,7 @@
resp, tenants = cls.identity_admin_client.list_tenants()
- #NOTE(afazekas): these test cases should always create and use a new
+ # NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
if cls.config.compute.allow_tenant_isolation:
cls.demo_tenant_id = cls.isolated_creds[0][0]['tenantId']
@@ -102,7 +102,7 @@
self.assertEqual(200, resp.status, "Failed to reset quota "
"defaults")
- #TODO(afazekas): merge these test cases
+ # TODO(afazekas): merge these test cases
@attr(type='gate')
def test_get_updated_quotas(self):
# Verify that GET shows the updated quota set
@@ -121,7 +121,7 @@
self.assertEqual(200, resp.status)
self.assertEqual(quota_set['ram'], 5120)
- #TODO(afazekas): Add dedicated tenant to the skiped quota tests
+ # TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type='gate')
@@ -155,12 +155,12 @@
ram=default_mem_quota)
self.assertRaises(exceptions.OverLimit, self.create_server)
-#TODO(afazekas): Add test that tried to update the quota_set as a regular user
+# TODO(afazekas): Add test that tried to update the quota_set as a regular user
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
- #Once instances quota limit is reached, disallow server creation
+ # Once instances quota limit is reached, disallow server creation
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_instances_quota = quota_set['instances']
instances_quota = 0 # Set quota to zero to disallow server creation
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index dc960db..434ea2f 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -70,13 +70,13 @@
s1 = map(lambda x: x['binary'], services)
s2 = map(lambda x: x['binary'], services_on_host)
- #sort the lists before comparing, to take out dependency
- #on order.
+ # sort the lists before comparing, to take out dependency
+ # on order.
self.assertEqual(sorted(s1), sorted(s2))
@attr(type=['negative', 'gate'])
def test_get_service_by_invalid_params(self):
- #return all services if send the request with invalid parameter
+ # return all services if send the request with invalid parameter
resp, services = self.client.list_services()
params = {'xxx': 'nova-compute'}
resp, services_xxx = self.client.list_services(params)
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index a8ac7de..930ebcb 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -32,15 +32,15 @@
cls.client = cls.floating_ips_client
cls.servers_client = cls.servers_client
- #Server creation
+ # Server creation
resp, server = cls.create_server(wait_until='ACTIVE')
cls.server_id = server['id']
resp, body = cls.servers_client.get_server(server['id'])
- #Floating IP creation
+ # Floating IP creation
resp, body = cls.client.create_floating_ip()
cls.floating_ip_id = body['id']
cls.floating_ip = body['ip']
- #Generating a nonexistent floatingIP id
+ # Generating a nonexistent floatingIP id
cls.floating_ip_ids = []
resp, body = cls.client.list_floating_ips()
for i in range(len(body)):
@@ -52,7 +52,7 @@
@classmethod
def tearDownClass(cls):
- #Deleting the floating IP which is created in this method
+ # Deleting the floating IP which is created in this method
resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).tearDownClass()
@@ -66,17 +66,17 @@
floating_ip_id_allocated = body['id']
resp, floating_ip_details = \
self.client.get_floating_ip_details(floating_ip_id_allocated)
- #Checking if the details of allocated IP is in list of floating IP
+ # Checking if the details of allocated IP is in list of floating IP
resp, body = self.client.list_floating_ips()
self.assertIn(floating_ip_details, body)
finally:
- #Deleting the floating IP which is created in this method
+ # Deleting the floating IP which is created in this method
self.client.delete_floating_ip(floating_ip_id_allocated)
@attr(type=['negative', 'gate'])
def test_allocate_floating_ip_from_nonexistent_pool(self):
# Positive test:Allocation of a new floating IP from a nonexistent_pool
- #to a project should fail
+ # to a project should fail
self.assertRaises(exceptions.NotFound,
self.client.create_floating_ip,
"non_exist_pool")
@@ -85,12 +85,12 @@
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
- #Creating the floating IP that is to be deleted in this method
+ # Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
- #Storing the details of floating IP before deleting it
+ # Storing the details of floating IP before deleting it
cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
resp, floating_ip_details = cli_resp
- #Deleting the floating IP from the project
+ # Deleting the floating IP from the project
resp, body = self.client.delete_floating_ip(floating_ip_body['id'])
self.assertEqual(202, resp.status)
# Check it was really deleted.
@@ -101,12 +101,12 @@
# Positive test:Associate and disassociate the provided floating IP
# to a specific server should be successful
- #Association of floating IP to fixed IP address
+ # Association of floating IP to fixed IP address
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
- #Disassociation of floating IP that was associated in this method
+ # Disassociation of floating IP that was associated in this method
resp, body = self.client.disassociate_floating_ip_from_server(
self.floating_ip,
self.server_id)
@@ -142,18 +142,18 @@
def test_associate_already_associated_floating_ip(self):
# positive test:Association of an already associated floating IP
# to specific server should change the association of the Floating IP
- #Create server so as to use for Multiple association
+ # Create server so as to use for Multiple association
resp, body = self.servers_client.create_server('floating-server2',
self.image_ref,
self.flavor_ref)
self.servers_client.wait_for_server_status(body['id'], 'ACTIVE')
self.new_server_id = body['id']
- #Associating floating IP for the first time
+ # Associating floating IP for the first time
resp, _ = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
- #Associating floating IP for the second time
+ # Associating floating IP for the second time
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.new_server_id)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 7e4e833..e380334 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -56,7 +56,7 @@
@attr(type='gate')
def test_get_floating_ip_details(self):
# Positive test:Should be able to GET the details of floatingIP
- #Creating a floating IP for which details are to be checked
+ # Creating a floating IP for which details are to be checked
try:
resp, body = self.client.create_floating_ip()
floating_ip_instance_id = body['instance_id']
@@ -66,14 +66,14 @@
resp, body = \
self.client.get_floating_ip_details(floating_ip_id)
self.assertEqual(200, resp.status)
- #Comparing the details of floating IP
+ # Comparing the details of floating IP
self.assertEqual(floating_ip_instance_id,
body['instance_id'])
self.assertEqual(floating_ip_ip, body['ip'])
self.assertEqual(floating_ip_fixed_ip,
body['fixed_ip'])
self.assertEqual(floating_ip_id, body['id'])
- #Deleting the floating IP created in this method
+ # Deleting the floating IP created in this method
finally:
self.client.delete_floating_ip(floating_ip_id)
@@ -85,7 +85,7 @@
resp, body = self.client.list_floating_ips()
for i in range(len(body)):
floating_ip_id.append(body[i]['id'])
- #Creating a nonexistant floatingIP id
+ # Creating a nonexistant floatingIP id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in floating_ip_id:
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 64f1854..14eced2 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -89,23 +89,6 @@
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server['id'], snapshot_name, meta)
- @testtools.skipUnless(compute.MULTI_USER,
- 'Need multiple users for this test.')
- @attr(type=['negative', 'gate'])
- def test_delete_image_of_another_tenant(self):
- # Return an error while trying to delete another tenant's image
- self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
- snapshot_name = rand_name('test-snap-')
- resp, body = self.client.create_image(self.server['id'], snapshot_name)
- image_id = parse_image_id(resp['location'])
- self.image_ids.append(image_id)
- self.client.wait_for_image_resp_code(image_id, 200)
- self.client.wait_for_image_status(image_id, 'ACTIVE')
-
- # Delete image
- self.assertRaises(exceptions.NotFound,
- self.alt_client.delete_image, image_id)
-
def _get_default_flavor_disk_size(self, flavor_id):
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
return flavor['disk']
@@ -144,16 +127,6 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_resource_deletion(image_id)
- @testtools.skipUnless(compute.MULTI_USER,
- 'Need multiple users for this test.')
- @attr(type=['negative', 'gate'])
- def test_create_image_for_server_in_another_tenant(self):
- # Creating image of another tenant's server should be return error
-
- snapshot_name = rand_name('test-snap-')
- self.assertRaises(exceptions.NotFound, self.alt_client.create_image,
- self.server['id'], snapshot_name)
-
@attr(type=['negative', 'gate'])
def test_create_second_image_when_first_image_is_being_saved(self):
# Disallow creating another image when first image is being saved
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index fb2906a..a80f456 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -144,16 +144,16 @@
# Verify only the expected number of results are returned
params = {'limit': '1'}
resp, images = self.client.list_images(params)
- #when _interface='xml', one element for images_links in images
- #ref: Question #224349
+ # when _interface='xml', one element for images_links in images
+ # ref: Question #224349
self.assertEqual(1, len([x for x in images if 'id' in x]))
@attr(type='gate')
def test_list_images_filter_by_changes_since(self):
# Verify only updated images are returned in the detailed list
- #Becoming ACTIVE will modify the updated time
- #Filter by the image's created time
+ # Becoming ACTIVE will modify the updated time
+ # Filter by the image's created time
params = {'changes-since': self.image3['created']}
resp, images = self.client.list_images(params)
found = any([i for i in images if i['id'] == self.image3_id])
@@ -222,8 +222,8 @@
def test_list_images_with_detail_filter_by_changes_since(self):
# Verify an update image is returned
- #Becoming ACTIVE will modify the updated time
- #Filter by the image's created time
+ # Becoming ACTIVE will modify the updated time
+ # Filter by the image's created time
params = {'changes-since': self.image1['created']}
resp, images = self.client.list_images_with_detail(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 3bcf7b4..e4e87c0 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -32,33 +32,33 @@
@attr(type='gate')
def test_keypairs_create_list_delete(self):
# Keypairs created should be available in the response list
- #Create 3 keypairs
+ # Create 3 keypairs
key_list = list()
for i in range(3):
k_name = rand_name('keypair-')
resp, keypair = self.client.create_keypair(k_name)
- #Need to pop these keys so that our compare doesn't fail later,
- #as the keypair dicts from list API doesn't have them.
+ # Need to pop these keys so that our compare doesn't fail later,
+ # as the keypair dicts from list API doesn't have them.
keypair.pop('private_key')
keypair.pop('user_id')
self.assertEqual(200, resp.status)
key_list.append(keypair)
- #Fetch all keypairs and verify the list
- #has all created keypairs
+ # Fetch all keypairs and verify the list
+ # has all created keypairs
resp, fetched_list = self.client.list_keypairs()
self.assertEqual(200, resp.status)
- #We need to remove the extra 'keypair' element in the
- #returned dict. See comment in keypairs_client.list_keypairs()
+ # We need to remove the extra 'keypair' element in the
+ # returned dict. See comment in keypairs_client.list_keypairs()
new_list = list()
for keypair in fetched_list:
new_list.append(keypair['keypair'])
fetched_list = new_list
- #Now check if all the created keypairs are in the fetched list
+ # Now check if all the created keypairs are in the fetched list
missing_kps = [kp for kp in key_list if kp not in fetched_list]
self.assertFalse(missing_kps,
"Failed to find keypairs %s in fetched list"
% ', '.join(m_key['name'] for m_key in missing_kps))
- #Delete all the keypairs created
+ # Delete all the keypairs created
for keypair in key_list:
resp, _ = self.client.delete_keypair(keypair['name'])
self.assertEqual(202, resp.status)
@@ -163,7 +163,7 @@
k_name = rand_name('keypair-')
resp, _ = self.client.create_keypair(k_name)
self.assertEqual(200, resp.status)
- #Now try the same keyname to ceate another key
+ # Now try the same keyname to ceate another key
self.assertRaises(exceptions.Duplicate, self.client.create_keypair,
k_name)
resp, _ = self.client.delete_keypair(k_name)
diff --git a/tempest/api/compute/limits/test_absolute_limits.py b/tempest/api/compute/limits/test_absolute_limits.py
index beae122..972e4a8 100644
--- a/tempest/api/compute/limits/test_absolute_limits.py
+++ b/tempest/api/compute/limits/test_absolute_limits.py
@@ -51,11 +51,11 @@
@attr(type=['negative', 'gate'])
def test_max_image_meta_exceed_limit(self):
- #We should not create vm with image meta over maxImageMeta limit
+ # We should not create vm with image meta over maxImageMeta limit
# Get max limit value
max_meta = self.client.get_specific_absolute_limit('maxImageMeta')
- #Create server should fail, since we are passing > metadata Limit!
+ # Create server should fail, since we are passing > metadata Limit!
max_meta_data = int(max_meta) + 1
meta_data = {}
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 6a32b64..472b8b4 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -15,8 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
+from tempest import config
from tempest import exceptions
from tempest.test import attr
@@ -33,14 +36,14 @@
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
# should be successfull
- #Creating a Security Group to add rules to it
+ # Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
securitygroup_id = securitygroup['id']
self.addCleanup(self.client.delete_security_group, securitygroup_id)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
ip_protocol = 'tcp'
from_port = 22
to_port = 22
@@ -60,21 +63,21 @@
secgroup1 = None
secgroup2 = None
- #Creating a Security Group to add rules to it
+ # Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
secgroup1 = securitygroup['id']
self.addCleanup(self.client.delete_security_group, secgroup1)
- #Creating a Security Group so as to assign group_id to the rule
+ # Creating a Security Group so as to assign group_id to the rule
s_name2 = rand_name('securitygroup-')
s_description2 = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name2, s_description2)
secgroup2 = securitygroup['id']
self.addCleanup(self.client.delete_security_group, secgroup2)
- #Adding rules to the created Security Group with optional arguments
+ # Adding rules to the created Security Group with optional arguments
parent_group_id = secgroup1
ip_protocol = 'tcp'
from_port = 22
@@ -91,6 +94,8 @@
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
@@ -108,12 +113,12 @@
def test_security_group_rules_create_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
- #Creating a Security Group to add rule to it
+ # Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = rand_name('999')
from_port = 22
@@ -128,12 +133,12 @@
def test_security_group_rules_create_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
- #Creating a Security Group to add rule to it
+ # Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = rand_name('999')
@@ -147,12 +152,12 @@
def test_security_group_rules_create_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
- #Creating a Security Group to add rule to it
+ # Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = 22
@@ -181,6 +186,8 @@
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_rules_delete_with_invalid_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 68be206..697a839 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -39,7 +39,7 @@
@attr(type='gate')
def test_security_groups_create_list_delete(self):
# Positive test:Should return the list of Security Groups
- #Create 3 Security Groups
+ # Create 3 Security Groups
security_group_list = list()
for i in range(3):
s_name = rand_name('securitygroup-')
@@ -50,11 +50,11 @@
self.addCleanup(self._delete_security_group,
securitygroup['id'])
security_group_list.append(securitygroup)
- #Fetch all Security Groups and verify the list
- #has all created Security Groups
+ # Fetch all Security Groups and verify the list
+ # has all created Security Groups
resp, fetched_list = self.client.list_security_groups()
self.assertEqual(200, resp.status)
- #Now check if all the created Security Groups are in fetched list
+ # Now check if all the created Security Groups are in fetched list
missing_sgs = \
[sg for sg in security_group_list if sg not in fetched_list]
self.assertFalse(missing_sgs,
@@ -62,8 +62,8 @@
"list" % ', '.join(m_group['name']
for m_group in missing_sgs))
- #TODO(afazekas): scheduled for delete,
- #test_security_group_create_get_delete covers it
+ # TODO(afazekas): scheduled for delete,
+ # test_security_group_create_get_delete covers it
@attr(type='gate')
def test_security_group_create_delete(self):
# Security Group should be created, verified and deleted
@@ -99,7 +99,7 @@
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
"not equal to the requested name")
- #Now fetch the created Security Group by its 'id'
+ # Now fetch the created Security Group by its 'id'
resp, fetched_group = \
self.client.get_security_group(securitygroup['id'])
self.assertEqual(200, resp.status)
@@ -115,7 +115,7 @@
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- #Creating a nonexistant Security Group id
+ # Creating a nonexistant Security Group id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
@@ -186,7 +186,7 @@
if body[i]['name'] == 'default':
default_security_group_id = body[i]['id']
break
- #Deleting the "default" Security Group
+ # Deleting the "default" Security Group
self.assertRaises(exceptions.BadRequest,
self.client.delete_security_group,
default_security_group_id)
@@ -198,7 +198,7 @@
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- #Creating Non Existant Security Group
+ # Creating Non Existant Security Group
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index e9385b5..efb01af 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -39,7 +39,7 @@
resp, server = self.create_server(disk_config='AUTO',
wait_until='ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
@@ -47,14 +47,14 @@
self.image_ref_alt,
disk_config='MANUAL')
- #Wait for the server to become active
+ # Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
@attr(type='gate')
@@ -63,7 +63,7 @@
resp, server = self.create_server(disk_config='MANUAL',
wait_until='ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
@@ -71,14 +71,14 @@
self.image_ref_alt,
disk_config='AUTO')
- #Wait for the server to become active
+ # Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
@testtools.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
@@ -88,7 +88,7 @@
resp, server = self.create_server(disk_config='MANUAL',
wait_until='ACTIVE')
- #Resize with auto option
+ # Resize with auto option
self.client.resize(server['id'], self.flavor_ref_alt,
disk_config='AUTO')
self.client.wait_for_server_status(server['id'], 'VERIFY_RESIZE')
@@ -98,7 +98,7 @@
resp, server = self.client.get_server(server['id'])
self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
@testtools.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
@@ -108,7 +108,7 @@
resp, server = self.create_server(disk_config='AUTO',
wait_until='ACTIVE')
- #Resize with manual option
+ # Resize with manual option
self.client.resize(server['id'], self.flavor_ref_alt,
disk_config='MANUAL')
self.client.wait_for_server_status(server['id'], 'VERIFY_RESIZE')
@@ -118,7 +118,7 @@
resp, server = self.client.get_server(server['id'])
self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 31b44f7..b8f965c 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -133,7 +133,7 @@
# Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers(params)
- #when _interface='xml', one element for servers_links in servers
+ # when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@utils.skip_unless_attr('multiple_images', 'Only one image found')
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index c03c43e..bad4a11 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -95,7 +95,7 @@
ignore_error=True)
cls.deleted_fixtures.append(srv)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server(self):
# Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
@@ -107,7 +107,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_image(self):
# Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
@@ -116,7 +116,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_flavor(self):
# Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
@@ -125,7 +125,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_server_name(self):
# Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
@@ -134,7 +134,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_status_non_existing(self):
# Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
@@ -148,23 +148,23 @@
# List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
- #when _interface='xml', one element for servers_links in servers
+ # when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_limits_greater_than_actual_count(self):
# List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_string(self):
# Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_negative_value(self):
# Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
@@ -182,13 +182,13 @@
len(self.deleted_fixtures))
self.assertEqual(num_expected, len(body['servers']))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes-since': '2011/01/01'})
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_future_date(self):
# Return an empty list when a date in the future is passed
changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
@@ -196,7 +196,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_detail_server_is_deleted(self):
# Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 8b76f7f..893d9e0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -35,7 +35,7 @@
run_ssh = tempest.config.TempestConfig().compute.run_ssh
def setUp(self):
- #NOTE(afazekas): Normally we use the same server with all test cases,
+ # NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ServerActionsTestJSON, self).setUp()
# Check if the server is in a clean state after test
@@ -121,13 +121,13 @@
personality=personality,
adminPass=password)
- #Verify the properties in the initial response are correct
+ # Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(self.flavor_ref, int(rebuilt_server['flavor']['id']))
- #Verify the server properties after the rebuild completes
+ # Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
resp, server = self.client.get_server(rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
@@ -279,6 +279,15 @@
cls.server_id = server['id']
cls.password = server['adminPass']
+ @attr(type='gate')
+ def test_stop_start_server(self):
+ resp, server = self.servers_client.stop(self.server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ resp, server = self.servers_client.start(self.server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
class ServerActionsTestXML(ServerActionsTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 442d30c..45de0d6 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -47,7 +47,7 @@
# All metadata key/value pairs for a server should be returned
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
- #Verify the expected metadata items are in the list
+ # Verify the expected metadata items are in the list
self.assertEqual(200, resp.status)
expected = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@@ -55,14 +55,14 @@
@attr(type='gate')
def test_set_server_metadata(self):
# The server's metadata should be replaced with the provided values
- #Create a new set of metadata for the server
+ # Create a new set of metadata for the server
req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
resp, metadata = self.client.set_server_metadata(self.server_id,
req_metadata)
self.assertEqual(200, resp.status)
- #Verify the expected values are correct, and that the
- #previous values have been removed
+ # Verify the expected values are correct, and that the
+ # previous values have been removed
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
self.assertEqual(resp_metadata, req_metadata)
@@ -98,7 +98,7 @@
meta)
self.assertEqual(200, resp.status)
- #Verify the values have been updated to the proper values
+ # Verify the values have been updated to the proper values
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'alt1', 'key2': 'value2', 'key3': 'value3'}
self.assertEqual(expected, resp_metadata)
@@ -123,13 +123,13 @@
@attr(type='gate')
def test_set_server_metadata_item(self):
# The item's value should be updated to the provided value
- #Update the metadata value
+ # Update the metadata value
meta = {'nova': 'alt'}
resp, body = self.client.set_server_metadata_item(self.server_id,
'nova', meta)
self.assertEqual(200, resp.status)
- #Verify the meta item's value has been updated
+ # Verify the meta item's value has been updated
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'value1', 'key2': 'value2', 'nova': 'alt'}
self.assertEqual(expected, resp_metadata)
@@ -141,7 +141,7 @@
'key1')
self.assertEqual(204, resp.status)
- #Verify the metadata item has been removed
+ # Verify the metadata item has been removed
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@@ -197,7 +197,7 @@
# Negative test: Should not be able to delete metadata item from a
# nonexistant server
- #Delete the metadata item
+ # Delete the metadata item
self.assertRaises(exceptions.NotFound,
self.client.delete_server_metadata_item, 999, 'd')
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 13c2f74..82559d5 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -32,12 +32,12 @@
super(ServerRescueTestJSON, cls).setUpClass()
cls.device = 'vdf'
- #Floating IP creation
+ # Floating IP creation
resp, body = cls.floating_ips_client.create_floating_ip()
cls.floating_ip_id = str(body['id']).strip()
cls.floating_ip = str(body['ip']).strip()
- #Security group creation
+ # Security group creation
cls.sg_name = rand_name('sg')
cls.sg_desc = rand_name('sg-desc')
resp, cls.sg = \
@@ -85,7 +85,7 @@
@classmethod
def tearDownClass(cls):
- #Deleting the floating IP which is created in this method
+ # Deleting the floating IP which is created in this method
cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
client = cls.volumes_extensions_client
client.delete_volume(str(cls.volume_to_attach['id']).strip())
@@ -110,6 +110,11 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+ def _unpause(self, server_id):
+ resp, body = self.servers_client.unpause_server(server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
@attr(type='smoke')
def test_rescue_unrescue_instance(self):
resp, body = self.servers_client.rescue_server(
@@ -121,6 +126,18 @@
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
@attr(type=['negative', 'gate'])
+ def test_rescue_paused_instance(self):
+ # Rescue a paused server
+ resp, body = self.servers_client.pause_server(
+ self.server_id)
+ self.addCleanup(self._unpause, self.server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id, 'PAUSED')
+ self.assertRaises(exceptions.Duplicate,
+ self.servers_client.rescue_server,
+ self.server_id)
+
+ @attr(type=['negative', 'gate'])
def test_rescued_vm_reboot(self):
self.assertRaises(exceptions.Duplicate, self.servers_client.reboot,
self.rescue_id, 'HARD')
@@ -165,7 +182,7 @@
# Rescue the server
self.servers_client.rescue_server(self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
- #addCleanup is a LIFO queue
+ # addCleanup is a LIFO queue
self.addCleanup(self._detach, self.server_id,
self.volume_to_detach['id'])
self.addCleanup(self._unrescue, self.server_id)
@@ -184,13 +201,13 @@
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
- #Association of floating IP to a rescued vm
+ # Association of floating IP to a rescued vm
client = self.floating_ips_client
resp, body = client.associate_floating_ip_to_server(self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
- #Disassociation of floating IP that was associated in this method
+ # Disassociation of floating IP that was associated in this method
resp, body = \
client.disassociate_floating_ip_from_server(self.floating_ip,
self.server_id)
@@ -203,12 +220,12 @@
self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
- #Add Security group
+ # Add Security group
resp, body = self.servers_client.add_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
- #Delete Security group
+ # Delete Security group
resp, body = self.servers_client.remove_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 1a65a20..55dba97 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -126,7 +126,7 @@
def test_list_servers_with_alternate_tenant(self):
# A list on servers from one tenant should not
# show on alternate tenant
- #Listing servers from alternate tenant
+ # Listing servers from alternate tenant
alt_server_ids = []
resp, body = self.alt_client.list_servers()
alt_server_ids = [s['id'] for s in body['servers']]
@@ -188,7 +188,7 @@
def test_create_keypair_in_analt_user_tenant(self):
# A create keypair request should fail if the tenant id does not match
# the current user
- #POST keypair with other user tenant
+ # POST keypair with other user tenant
k_name = rand_name('keypair-')
self.alt_keypairs_client._set_auth()
self.saved_base_url = self.alt_keypairs_client.base_url
@@ -238,7 +238,7 @@
def test_create_security_group_in_analt_user_tenant(self):
# A create security group request should fail if the tenant id does not
# match the current user
- #POST security group with other user tenant
+ # POST security group with other user tenant
s_name = rand_name('security-')
s_description = rand_name('security')
self.saved_base_url = self.alt_security_client.base_url
@@ -276,7 +276,7 @@
def test_create_security_group_rule_in_analt_user_tenant(self):
# A create security group rule request should fail if the tenant id
# does not match the current user
- #POST security group rule with other user tenant
+ # POST security group rule with other user tenant
parent_group_id = self.security_group['id']
ip_protocol = 'icmp'
from_port = -1
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index f2dd93c..4f0f17e 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -38,7 +38,7 @@
volume = None
v_name = rand_name('Volume-%s-') % self._interface
metadata = {'Type': 'work'}
- #Create volume
+ # Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata)
@@ -51,12 +51,12 @@
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
- #Wait for Volume status to become ACTIVE
+ # Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
- #GET Volume
+ # GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
- #Verfication of details of fetched Volume
+ # Verfication of details of fetched Volume
self.assertEqual(v_name,
fetched_volume['displayName'],
'The fetched Volume is different '
@@ -74,7 +74,7 @@
def test_volume_get_metadata_none(self):
# CREATE, GET empty metadata dict
v_name = rand_name('Volume-')
- #Create volume
+ # Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata={})
@@ -82,19 +82,19 @@
self.assertEqual(200, resp.status)
self.assertIn('id', volume)
self.assertIn('displayName', volume)
- #Wait for Volume status to become ACTIVE
+ # Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
- #GET Volume
+ # GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(fetched_volume['metadata'], {})
def _delete_volume(self, volume):
- #Delete the Volume created in this method
+ # Delete the Volume created in this method
try:
resp, _ = self.client.delete_volume(volume['id'])
self.assertEqual(202, resp.status)
- #Checking if the deleted Volume still exists
+ # Checking if the deleted Volume still exists
self.client.wait_for_resource_deletion(volume['id'])
except KeyError:
return
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 02cc4e1..0e475cf 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -97,10 +97,10 @@
@attr(type='gate')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
- #Fetch all Volumes
+ # Fetch all Volumes
resp, fetched_list = self.client.list_volumes_with_detail()
self.assertEqual(200, resp.status)
- #Now check if all the Volumes created in setup are in fetched list
+ # Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index 2ecf3e8..c91e95b 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -35,7 +35,7 @@
@attr(type=['negative', 'gate'])
def test_volume_get_nonexistant_volume_id(self):
# Negative: Should not be able to get details of nonexistant volume
- #Creating a nonexistant volume id
+ # Creating a nonexistant volume id
volume_id_list = list()
resp, body = self.client.list_volumes()
for i in range(len(body)):
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index a590735..2be0c29 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -29,14 +29,14 @@
def test_create_get_delete_service(self):
# GET Service
try:
- #Creating a Service
+ # Creating a Service
name = rand_name('service-')
type = rand_name('type--')
description = rand_name('description-')
resp, service_data = self.client.create_service(
name, type, description=description)
self.assertTrue(resp['status'].startswith('2'))
- #Verifying response body of create service
+ # Verifying response body of create service
self.assertIn('id', service_data)
self.assertFalse(service_data['id'] is None)
self.assertIn('name', service_data)
@@ -45,10 +45,10 @@
self.assertEqual(type, service_data['type'])
self.assertIn('description', service_data)
self.assertEqual(description, service_data['description'])
- #Get service
+ # Get service
resp, fetched_service = self.client.get_service(service_data['id'])
self.assertTrue(resp['status'].startswith('2'))
- #verifying the existence of service created
+ # verifying the existence of service created
self.assertIn('id', fetched_service)
self.assertEquals(fetched_service['id'], service_data['id'])
self.assertIn('name', fetched_service)
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 3a20081..6f90b04 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -240,12 +240,12 @@
self.assertEquals('200', resp['status'])
user_ids.append(user2['id'])
self.data.users.append(user2)
- #List of users for the respective tenant ID
+ # List of users for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
self.assertIn(resp['status'], ('200', '203'))
for i in body:
fetched_user_ids.append(i['id'])
- #verifying the user Id in the list
+ # verifying the user Id in the list
missing_users =\
[user for user in user_ids if user not in fetched_user_ids]
self.assertEqual(0, len(missing_users),
@@ -260,7 +260,7 @@
user = self.get_user_by_name(self.data.test_user)
tenant = self.get_tenant_by_name(self.data.test_tenant)
role = self.get_role_by_name(self.data.test_role)
- #Assigning roles to two users
+ # Assigning roles to two users
user_ids = list()
fetched_user_ids = list()
user_ids.append(user['id'])
@@ -277,12 +277,12 @@
second_user['id'],
role['id'])
self.assertEquals('200', resp['status'])
- #List of users with roles for the respective tenant ID
+ # List of users with roles for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
self.assertEquals('200', resp['status'])
for i in body:
fetched_user_ids.append(i['id'])
- #verifying the user Id in the list
+ # verifying the user Id in the list
missing_users = [missing_user for missing_user in user_ids
if missing_user not in fetched_user_ids]
self.assertEqual(0, len(missing_users),
@@ -293,13 +293,13 @@
def test_list_users_with_invalid_tenant(self):
# Should not be able to return a list of all
# users for a nonexistant tenant
- #Assign invalid tenant ids
+ # Assign invalid tenant ids
invalid_id = list()
invalid_id.append(rand_name('999'))
invalid_id.append('alpha')
invalid_id.append(rand_name("dddd@#%%^$"))
invalid_id.append('!@#()$%^&*?<>{}[]')
- #List the users with invalid tenant id
+ # List the users with invalid tenant id
for invalid in invalid_id:
self.assertRaises(exceptions.NotFound,
self.client.list_users_for_tenant, invalid)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 3d40eb3..9136934 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -33,7 +33,7 @@
@attr(type='smoke')
def test_list_domains(self):
- #Test to list domains
+ # Test to list domains
domain_ids = list()
fetched_ids = list()
for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index f01cc64..9f7b24b 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -37,7 +37,7 @@
description=s_description)
cls.service_id = cls.service_data['id']
cls.service_ids.append(cls.service_id)
- #Create endpoints so as to use for LIST and GET test cases
+ # Create endpoints so as to use for LIST and GET test cases
cls.setup_endpoints = list()
for i in range(2):
region = rand_name('region')
@@ -58,7 +58,7 @@
def test_list_endpoints(self):
# Get a list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
- #Asserting LIST Endpoint
+ # Asserting LIST Endpoint
self.assertEqual(resp['status'], '200')
missing_endpoints =\
[e for e in self.setup_endpoints if e not in fetched_endpoints]
@@ -78,11 +78,11 @@
self.client.create_endpoint(self.service_id, interface, url,
region=region, enabled=True)
create_flag = True
- #Asserting Create Endpoint response body
+ # Asserting Create Endpoint response body
self.assertEqual(resp['status'], '201')
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
- #Checking if created endpoint is present in the list of endpoints
+ # Checking if created endpoint is present in the list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
for e in fetched_endpoints:
if endpoint['id'] == e['id']:
@@ -93,12 +93,12 @@
finally:
if create_flag:
matched = False
- #Deleting the endpoint created in this method
+ # Deleting the endpoint created in this method
resp_header, resp_body =\
self.client.delete_endpoint(endpoint['id'])
self.assertEqual(resp_header['status'], '204')
self.assertEqual(resp_body, '')
- #Checking whether endpoint is deleted successfully
+ # Checking whether endpoint is deleted successfully
resp, fetched_endpoints = self.client.list_endpoints()
for e in fetched_endpoints:
if endpoint['id'] == e['id']:
@@ -108,8 +108,8 @@
@attr(type='smoke')
def test_update_endpoint(self):
- #Creating an endpoint so as to check update endpoint
- #with new values
+ # Creating an endpoint so as to check update endpoint
+ # with new values
region1 = rand_name('region')
url1 = rand_name('url')
interface1 = 'public'
@@ -117,7 +117,7 @@
self.client.create_endpoint(self.service_id, interface1,
url1, region=region1,
enabled=True)
- #Creating service so as update endpoint with new service ID
+ # Creating service so as update endpoint with new service ID
s_name = rand_name('service-')
s_type = rand_name('type--')
s_description = rand_name('description-')
@@ -125,7 +125,7 @@
self.identity_client.create_service(s_name, s_type,
description=s_description)
self.service_ids.append(self.service2['id'])
- #Updating endpoint with new values
+ # Updating endpoint with new values
region2 = rand_name('region')
url2 = rand_name('url')
interface2 = 'internal'
@@ -135,7 +135,7 @@
interface=interface2, url=url2,
region=region2, enabled=False)
self.assertEqual(resp['status'], '200')
- #Asserting if the attributes of endpoint are updated
+ # Asserting if the attributes of endpoint are updated
self.assertEqual(self.service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
self.assertEqual(url2, endpoint['url'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 681db07..737a0e0 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -29,7 +29,7 @@
@attr(type='smoke')
def test_list_policies(self):
- #Test to list policies
+ # Test to list policies
policy_ids = list()
fetched_ids = list()
for _ in range(3):
@@ -50,7 +50,7 @@
@attr(type='smoke')
def test_create_update_delete_policy(self):
- #Test to update policy
+ # Test to update policy
blob = rand_name('BlobName-')
policy_type = rand_name('PolicyType-')
resp, policy = self.policy_client.create_policy(blob, policy_type)
@@ -63,12 +63,12 @@
self.assertEqual(policy_type, policy['type'])
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertEqual(resp['status'], '200')
- #Update policy
+ # Update policy
update_type = rand_name('UpdatedPolicyType-')
resp, data = self.policy_client.update_policy(
policy['id'], type=update_type)
self.assertIn('type', data)
- #Assertion for updated value with fetched value
+ # Assertion for updated value with fetched value
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertIn('id', fetched_policy)
self.assertIn('blob', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index b35b93a..bfa0d84 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -1,4 +1,4 @@
-#vim: tabstop=4 shiftwidth=4 softtabstop=4
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
@@ -33,7 +33,7 @@
resp, body = self.client.create_service(
name, type, description=description)
self.assertEqual('200', resp['status'])
- #Deleting the service created in this method
+ # Deleting the service created in this method
self.addCleanup(self.client.delete_service, body['id'])
s_id = body['id']
@@ -46,7 +46,7 @@
self.assertEqual('200', resp['status'])
self.assertNotEqual(resp1_desc, resp2_desc)
- #Get service
+ # Get service
resp, body = self.client.get_service(s_id)
resp3_desc = body['description']
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 04e993d..bf7a554 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -26,7 +26,7 @@
@attr(type='gate')
def test_user_update(self):
# Test case to check if updating of user attributes is successful.
- #Creating first user
+ # Creating first user
u_name = rand_name('user-')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
@@ -36,12 +36,12 @@
email=u_email, enabled=False)
# Delete the User at the end of this method
self.addCleanup(self.v3_client.delete_user, user['id'])
- #Creating second project for updation
+ # Creating second project for updation
resp, project = self.v3_client.create_project(
rand_name('project-'), description=rand_name('project-desc-'))
# Delete the Project at the end of this method
self.addCleanup(self.v3_client.delete_project, project['id'])
- #Updating user details with new values
+ # Updating user details with new values
u_name2 = rand_name('user2-')
u_email2 = u_name2 + '@testmail.tm'
u_description2 = u_name2 + ' description'
@@ -49,7 +49,7 @@
user['id'], name=u_name2, description=u_description2,
project_id=project['id'],
email=u_email2, enabled=False)
- #Assert response body of update user.
+ # Assert response body of update user.
self.assertEqual(200, resp.status)
self.assertEqual(u_name2, update_user['name'])
self.assertEqual(u_description2, update_user['description'])
@@ -57,9 +57,9 @@
update_user['project_id'])
self.assertEqual(u_email2, update_user['email'])
self.assertEqual('false', str(update_user['enabled']).lower())
- #GET by id after updation
+ # GET by id after updation
resp, new_user_get = self.v3_client.get_user(user['id'])
- #Assert response body of GET after updation
+ # Assert response body of GET after updation
self.assertEqual(u_name2, new_user_get['name'])
self.assertEqual(u_description2, new_user_get['description'])
self.assertEqual(project['id'],
@@ -69,14 +69,14 @@
@attr(type='gate')
def test_list_user_projects(self):
- #List the projects that a user has access upon
+ # List the projects that a user has access upon
assigned_project_ids = list()
fetched_project_ids = list()
_, u_project = self.v3_client.create_project(
rand_name('project-'), description=rand_name('project-desc-'))
# Delete the Project at the end of this method
self.addCleanup(self.v3_client.delete_project, u_project['id'])
- #Create a user.
+ # Create a user.
u_name = rand_name('user-')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
@@ -100,7 +100,7 @@
_, project = self.v3_client.get_project(project_body['id'])
# Delete the Project at the end of this method
self.addCleanup(self.v3_client.delete_project, project_body['id'])
- #Assigning roles to user on project
+ # Assigning roles to user on project
self.v3_client.assign_user_role(project['id'],
user['id'],
role['id'])
@@ -109,7 +109,7 @@
self.assertEqual(200, resp.status)
for i in body:
fetched_project_ids.append(i['id'])
- #verifying the project ids in list
+ # verifying the project ids in list
missing_projects =\
[p for p in assigned_project_ids
if p not in fetched_project_ids]
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 7de7821..ad7be39 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -105,3 +105,8 @@
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
self.assertIn(image, image_list)
+
+ @attr(type=['negative', 'gate'])
+ def test_get_image_meta_by_null_id(self):
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_image_metadata, '')
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 142ad7d..d3fa763 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -38,6 +38,11 @@
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
+
+ Finally, it is assumed that the following option is defined in the
+ [service_available] section of etc/tempest.conf
+
+ neutron as True
"""
@classmethod
@@ -49,9 +54,12 @@
cls.client = os.network_client
cls.networks = []
cls.subnets = []
+ cls.ports = []
@classmethod
def tearDownClass(cls):
+ for port in cls.ports:
+ cls.client.delete_port(port['id'])
for subnet in cls.subnets:
cls.client.delete_subnet(subnet['id'])
for network in cls.networks:
@@ -93,3 +101,11 @@
subnet = body['subnet']
cls.subnets.append(subnet)
return subnet
+
+ @classmethod
+ def create_port(cls, network):
+ """Wrapper utility that returns a test port."""
+ resp, body = cls.client.create_port(network['id'])
+ port = body['port']
+ cls.ports.append(port)
+ return port
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 4481853..00a8ef7 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -35,6 +35,13 @@
create a subnet for a tenant
list tenant's subnets
show a tenant subnet details
+ port create
+ port delete
+ port list
+ port show
+ port update
+ network update
+ subnet update
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
@@ -53,21 +60,28 @@
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
+ cls.port = cls.create_port(cls.network)
@attr(type='gate')
- def test_create_delete_network_subnet(self):
+ def test_create_update_delete_network_subnet(self):
# Creates a network
name = rand_name('network-')
resp, body = self.client.create_network(name)
self.assertEqual('201', resp['status'])
network = body['network']
- self.assertTrue(network['id'] is not None)
+ net_id = network['id']
+ # Verification of network update
+ new_name = "New_network"
+ resp, body = self.client.update_network(net_id, new_name)
+ self.assertEqual('200', resp['status'])
+ updated_net = body['network']
+ self.assertEqual(updated_net['name'], new_name)
# Find a cidr that is not in use yet and create a subnet with it
cidr = netaddr.IPNetwork(self.network_cfg.tenant_network_cidr)
mask_bits = self.network_cfg.tenant_network_mask_bits
for subnet_cidr in cidr.subnet(mask_bits):
try:
- resp, body = self.client.create_subnet(network['id'],
+ resp, body = self.client.create_subnet(net_id,
str(subnet_cidr))
break
except exceptions.BadRequest as e:
@@ -76,11 +90,17 @@
raise
self.assertEqual('201', resp['status'])
subnet = body['subnet']
- self.assertTrue(subnet['id'] is not None)
- #Deletes subnet and network
- resp, body = self.client.delete_subnet(subnet['id'])
+ subnet_id = subnet['id']
+ # Verification of subnet update
+ new_subnet = "New_subnet"
+ resp, body = self.client.update_subnet(subnet_id, new_subnet)
+ self.assertEqual('200', resp['status'])
+ updated_subnet = body['subnet']
+ self.assertEqual(updated_subnet['name'], new_subnet)
+ # Deletes subnet and network
+ resp, body = self.client.delete_subnet(subnet_id)
self.assertEqual('204', resp['status'])
- resp, body = self.client.delete_network(network['id'])
+ resp, body = self.client.delete_network(net_id)
self.assertEqual('204', resp['status'])
@attr(type='gate')
@@ -97,8 +117,12 @@
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
networks = body['networks']
- found = any(n for n in networks if n['id'] == self.network['id'])
- self.assertTrue(found)
+ found = None
+ for n in networks:
+ if (n['id'] == self.network['id']):
+ found = n['id']
+ msg = "Network list doesn't contain created network"
+ self.assertIsNotNone(found, msg)
@attr(type='gate')
def test_show_subnet(self):
@@ -114,5 +138,57 @@
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
subnets = body['subnets']
- found = any(n for n in subnets if n['id'] == self.subnet['id'])
- self.assertTrue(found)
+ found = None
+ for n in subnets:
+ if (n['id'] == self.subnet['id']):
+ found = n['id']
+ msg = "Subnet list doesn't contain created subnet"
+ self.assertIsNotNone(found, msg)
+
+ @attr(type='gate')
+ def test_create_update_delete_port(self):
+ # Verify that successful port creation & deletion
+ resp, body = self.client.create_port(self.network['id'])
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ # Verification of port update
+ new_port = "New_Port"
+ resp, body = self.client.update_port(port['id'], new_port)
+ self.assertEqual('200', resp['status'])
+ updated_port = body['port']
+ self.assertEqual(updated_port['name'], new_port)
+ # Verification of port delete
+ resp, body = self.client.delete_port(port['id'])
+ self.assertEqual('204', resp['status'])
+
+ @attr(type='gate')
+ def test_show_ports(self):
+ # Verify the details of port
+ resp, body = self.client.show_port(self.port['id'])
+ self.assertEqual('200', resp['status'])
+ port = body['port']
+ self.assertEqual(self.port['id'], port['id'])
+
+ @attr(type='gate')
+ def test_list_ports(self):
+ # Verify the port exists in the list of all ports
+ resp, body = self.client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports_list = body['ports']
+ found = None
+ for n in ports_list:
+ if (n['id'] == self.port['id']):
+ found = n['id']
+ self.assertIsNotNone(found, "Port list doesn't contain created port")
+
+ @attr(type=['negative', 'gate'])
+ def test_show_non_existent_network(self):
+ non_exist_id = rand_name('network')
+ self.assertRaises(exceptions.NotFound, self.client.show_network,
+ non_exist_id)
+
+ @attr(type=['negative', 'gate'])
+ def test_show_non_existent_subnet(self):
+ non_exist_id = rand_name('subnet')
+ self.assertRaises(exceptions.NotFound, self.client.show_subnet,
+ non_exist_id)
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/test_quotas.py
new file mode 100644
index 0000000..ba70f34
--- /dev/null
+++ b/tempest/api/network/test_quotas.py
@@ -0,0 +1,91 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.api.network import base
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class QuotasTest(base.BaseNetworkTest):
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ list quotas for tenants who have non-default quota values
+ show quotas for a specified tenant
+ update quotas for a specified tenant
+ reset quotas to default values for a specified tenant
+
+ v2.0 of the API is assumed. It is also assumed that the following
+ option is defined in the [service_available] section of etc/tempest.conf:
+
+ neutron as True
+
+ Finally, it is assumed that the per-tenant quota extension API is
+ configured in /etc/neutron/neutron.conf as follows:
+
+ quota_driver = neutron.db.quota_db.DbQuotaDriver
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(QuotasTest, cls).setUpClass()
+ admin_manager = clients.AdminManager()
+ cls.admin_client = admin_manager.network_client
+ cls.identity_admin_client = admin_manager.identity_client
+
+ @attr(type='gate')
+ def test_quotas(self):
+ # Add a tenant to conduct the test
+ test_tenant = rand_name('test_tenant_')
+ test_description = rand_name('desc_')
+ _, tenant = self.identity_admin_client.create_tenant(
+ name=test_tenant,
+ description=test_description)
+ tenant_id = tenant['id']
+ self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+ # Change quotas for tenant
+ new_quotas = {'network': 0, 'security_group': 0}
+ resp, quota_set = self.admin_client.update_quotas(tenant_id,
+ **new_quotas)
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self.admin_client.reset_quotas, tenant_id)
+ self.assertEqual(0, quota_set['network'])
+ self.assertEqual(0, quota_set['security_group'])
+ # Confirm our tenant is listed among tenants with non default quotas
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ found = False
+ for qs in non_default_quotas:
+ if qs['tenant_id'] == tenant_id:
+ found = True
+ self.assertTrue(found)
+ # Confirm from APi quotas were changed as requested for tenant
+ resp, quota_set = self.admin_client.show_quotas(tenant_id)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(0, quota_set['network'])
+ self.assertEqual(0, quota_set['security_group'])
+ # Reset quotas to default and confirm
+ resp, body = self.admin_client.reset_quotas(tenant_id)
+ self.assertEqual('204', resp['status'])
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ for q in non_default_quotas:
+ self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
new file mode 100644
index 0000000..bc050dc
--- /dev/null
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
+#
+# Author: Joe H. Rahme <joe.hakim.rahme@enovance.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.object_storage import base
+from tempest import clients
+from tempest.common.utils.data_utils import arbitrary_string
+from tempest.common.utils.data_utils import rand_name
+import tempest.config
+from tempest import exceptions
+from tempest.test import attr
+
+
+class AccountQuotasTest(base.BaseObjectTest):
+ accounts_quotas_available = \
+ tempest.config.TempestConfig().object_storage.accounts_quotas_available
+
+ @classmethod
+ def setUpClass(cls):
+ super(AccountQuotasTest, cls).setUpClass()
+ cls.container_name = rand_name(name="TestContainer")
+ cls.container_client.create_container(cls.container_name)
+
+ cls.data.setup_test_user()
+
+ cls.os_reselleradmin = clients.Manager(
+ cls.data.test_user,
+ cls.data.test_password,
+ cls.data.test_tenant)
+
+ # Retrieve the ResellerAdmin role id
+ reseller_role_id = None
+ try:
+ _, roles = cls.os_admin.identity_client.list_roles()
+ reseller_role_id = next(r['id'] for r in roles if r['name']
+ == 'ResellerAdmin')
+ except StopIteration:
+ msg = "No ResellerAdmin role found"
+ raise exceptions.NotFound(msg)
+
+ # Retrieve the ResellerAdmin tenant id
+ _, users = cls.os_admin.identity_client.get_users()
+ reseller_user_id = next(usr['id'] for usr in users if usr['name']
+ == cls.data.test_user)
+
+ # Retrieve the ResellerAdmin tenant id
+ _, tenants = cls.os_admin.identity_client.list_tenants()
+ reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
+ == cls.data.test_tenant)
+
+ # Assign the newly created user the appropriate ResellerAdmin role
+ cls.os_admin.identity_client.assign_user_role(
+ reseller_tenant_id,
+ reseller_user_id,
+ reseller_role_id)
+
+ # Retrieve a ResellerAdmin auth token and use it to set a quota
+ # on the client's account
+ cls.reselleradmin_token = cls.token_client.get_token(
+ cls.data.test_user,
+ cls.data.test_password,
+ cls.data.test_tenant)
+
+ headers = {"X-Auth-Token": cls.reselleradmin_token,
+ "X-Account-Meta-Quota-Bytes": "20"}
+
+ cls.os.custom_account_client.request("POST", "", headers, "")
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.delete_containers([cls.container_name])
+ cls.data.teardown_all()
+
+ # remove the quota from the container
+ headers = {"X-Auth-Token": cls.reselleradmin_token,
+ "X-Remove-Account-Meta-Quota-Bytes": "x"}
+
+ cls.os.custom_account_client.request("POST", "", headers, "")
+
+ super(AccountQuotasTest, cls).tearDownClass()
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type="smoke")
+ def test_upload_valid_object(self):
+ object_name = rand_name(name="TestObject")
+ data = arbitrary_string()
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+
+ self.assertEqual(resp["status"], "201")
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["negative", "smoke"])
+ def test_upload_large_object(self):
+ object_name = rand_name(name="TestObject")
+ data = arbitrary_string(30)
+ self.assertRaises(exceptions.OverLimit,
+ self.object_client.create_object,
+ self.container_name, object_name, data)
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index aaa2c64..b546cec 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -45,7 +45,7 @@
@testtools.skip('Until Bug #1069849 is resolved.')
@attr(type='gate')
def test_get_object_after_expiry_time(self):
- #TODO(harika-vakadi): similar test case has to be created for
+ # TODO(harika-vakadi): similar test case has to be created for
# "X-Delete-At", after this test case works.
# create object
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
index 7897b70..fe55ecf 100644
--- a/tempest/api/orchestration/stacks/test_instance_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
@@ -145,7 +145,7 @@
'ImageId': cls.orchestration_cfg.image_ref
})
- @attr(type='gate')
+ @attr(type='slow')
@testtools.skipIf(existing_keypair, 'Server ssh tests are disabled.')
def test_can_log_into_created_server(self):
@@ -168,7 +168,7 @@
server, 'ec2-user', pkey=self.keypair['private_key'])
self.assertTrue(linux_client.can_authenticate())
- @attr(type='gate')
+ @attr(type='slow')
def test_stack_wait_condition_data(self):
sid = self.stack_identifier
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 13d0d48..b64a324 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -88,12 +88,12 @@
@classmethod
def tearDownClass(cls):
- ## volumes deletion
+ # volumes deletion
for volume_id in cls.volume_id_list:
cls.volume_client.delete_volume(volume_id)
cls.volume_client.wait_for_resource_deletion(volume_id)
- ## volume types deletion
+ # volume types deletion
for volume_type_id in cls.volume_type_id_list:
cls.type_client.delete_volume_type(volume_type_id)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index bb0047d..379baa2 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -80,7 +80,7 @@
'available')
return snapshot
- #NOTE(afazekas): these create_* and clean_* could be defined
+ # NOTE(afazekas): these create_* and clean_* could be defined
# only in a single location in the source, and could be more general.
@classmethod
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index ee285db..39f61f3 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -34,7 +34,7 @@
volume = {}
v_name = rand_name('Volume')
metadata = {'Type': 'Test'}
- #Create a volume
+ # Create a volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata,
@@ -86,7 +86,7 @@
self.assertIn('id', volume)
self.assertIn('display_name', volume)
self.client.wait_for_volume_status(volume['id'], 'available')
- #GET Volume
+ # GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(fetched_volume['metadata'], {})
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index eea37e0..e2b15a4 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -32,7 +32,7 @@
@attr(type='gate')
def test_volume_get_nonexistant_volume_id(self):
# Should not be able to get a nonexistant volume
- #Creating a nonexistant volume id
+ # Creating a nonexistant volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
for i in range(len(volumes)):
@@ -41,7 +41,7 @@
non_exist_id = rand_name('999')
if non_exist_id not in volume_id_list:
break
- #Trying to Get a non existant volume
+ # Trying to Get a non existant volume
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 00e025d..f04d23f 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -35,6 +35,9 @@
cfg.StrOpt('cli_dir',
default='/usr/local/bin/',
help="directory where python client binaries are located"),
+ cfg.IntOpt('timeout',
+ default=15,
+ help="Number of seconds to wait on a CLI timeout"),
]
CONF = cfg.CONF
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index e60e238..4c7f604 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -176,7 +176,7 @@
self.nova('list', flags='--debug')
def test_admin_timeout(self):
- self.nova('list', flags='--timeout 2')
+ self.nova('list', flags='--timeout %d' % CONF.cli.timeout)
def test_admin_timing(self):
self.nova('list', flags='--timing')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 4002081..4c7982b 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -18,9 +18,13 @@
import re
import subprocess
+from oslo.config import cfg
+
import tempest.cli
from tempest.openstack.common import log as logging
+CONF = cfg.CONF
+
LOG = logging.getLogger(__name__)
@@ -117,4 +121,4 @@
self.keystone('catalog', flags='--debug')
def test_admin_timeout(self):
- self.keystone('catalog', flags='--timeout 15')
+ self.keystone('catalog', flags='--timeout %d' % CONF.cli.timeout)
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 4860090..7b8340d 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -56,7 +56,8 @@
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
- self.neutron('dhcp-agent-list-hosting-net', params="private")
+ self.neutron('dhcp-agent-list-hosting-net',
+ params=CONF.compute.fixed_network_name)
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
diff --git a/tempest/config.py b/tempest/config.py
index 19170ae..9b1a91e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -366,6 +366,9 @@
default=5,
help="Number of seconds to wait while looping to check the"
"status of a container to container synchronization"),
+ cfg.BoolOpt('accounts_quotas_available',
+ default=True,
+ help="Set to True if the Account Quota middleware is enabled"),
]
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 446a674..2c808a9 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -23,13 +23,11 @@
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
V1 API has been removed from the code base.
- Implements the following operations for each one of the basic Neutron
+ Implements create, delete, list and show for the basic Neutron
abstractions (networks, sub-networks and ports):
- create
- delete
- list
- show
+ It also implements list, show, update and reset for OpenStack Networking
+ quotas
"""
def __init__(self, config, username, password, auth_url, tenant_name=None):
@@ -128,3 +126,64 @@
resp, body = self.get(uri, self.headers)
body = json.loads(body)
return resp, body
+
+ def update_quotas(self, tenant_id, **kwargs):
+ put_body = {'quota': kwargs}
+ body = json.dumps(put_body)
+ uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+ resp, body = self.put(uri, body, self.headers)
+ body = json.loads(body)
+ return resp, body['quota']
+
+ def show_quotas(self, tenant_id):
+ uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body['quota']
+
+ def reset_quotas(self, tenant_id):
+ uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def list_quotas(self):
+ uri = '%s/quotas' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body['quotas']
+
+ def update_subnet(self, subnet_id, new_name):
+ put_body = {
+ 'subnet': {
+ 'name': new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/subnets/%s' % (self.uri_prefix, subnet_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_port(self, port_id, new_name):
+ put_body = {
+ 'port': {
+ 'name': new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/ports/%s' % (self.uri_prefix, port_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_network(self, network_id, new_name):
+ put_body = {
+ "network": {
+ "name": new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/networks/%s' % (self.uri_prefix, network_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 661763c..31642b0 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -30,7 +30,7 @@
To test installation, do the following (from the tempest/stress directory):
- ./run_stress.py etc/sample-test.json -d 30
+ ./run_stress.py etc/server-create-destroy-test.json -d 30
This sample test tries to create a few VMs and kill a few VMs.
diff --git a/tempest/stress/actions/create_destroy_server.py b/tempest/stress/actions/server_create_destroy.py
similarity index 96%
rename from tempest/stress/actions/create_destroy_server.py
rename to tempest/stress/actions/server_create_destroy.py
index 68dc148..1a1e30b 100644
--- a/tempest/stress/actions/create_destroy_server.py
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -16,7 +16,7 @@
import tempest.stress.stressaction as stressaction
-class CreateDestroyServerTest(stressaction.StressAction):
+class ServerCreateDestroyTest(stressaction.StressAction):
def setUp(self, **kwargs):
self.image = self.manager.config.compute.image_ref
diff --git a/tempest/stress/actions/volume_create_delete.py b/tempest/stress/actions/volume_create_delete.py
index 184f870..e29d9c4 100644
--- a/tempest/stress/actions/volume_create_delete.py
+++ b/tempest/stress/actions/volume_create_delete.py
@@ -14,20 +14,20 @@
import tempest.stress.stressaction as stressaction
-class CreateDeleteTest(stressaction.StressAction):
+class VolumeCreateDeleteTest(stressaction.StressAction):
def run(self):
name = rand_name("volume")
self.logger.info("creating %s" % name)
- resp, volume = self.manager.volumes_client.\
- create_volume(size=1, display_name=name)
+ volumes_client = self.manager.volumes_client
+ resp, volume = volumes_client.create_volume(size=1,
+ display_name=name)
assert(resp.status == 200)
vol_id = volume['id']
- status = 'available'
- self.manager.volumes_client.wait_for_volume_status(vol_id, status)
+ volumes_client.wait_for_volume_status(vol_id, 'available')
self.logger.info("created %s" % volume['id'])
self.logger.info("deleting %s" % name)
- resp, _ = self.manager.volumes_client.delete_volume(vol_id)
+ resp, _ = volumes_client.delete_volume(vol_id)
assert(resp.status == 202)
- self.manager.volumes_client.wait_for_resource_deletion(vol_id)
+ volumes_client.wait_for_resource_deletion(vol_id)
self.logger.info("deleted %s" % vol_id)
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index c4c2041..d9b95e0 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -14,6 +14,7 @@
import logging
import multiprocessing
+import signal
import time
from tempest import clients
@@ -45,6 +46,7 @@
# add the handler to the root logger
logger = logging.getLogger('tempest.stress')
logger.addHandler(_console)
+processes = []
def do_ssh(command, host):
@@ -93,10 +95,29 @@
return None
-def stress_openstack(tests, duration, max_runs=None):
+def sigchld_handler(signal, frame):
+ """
+ Signal handler (only active if stop_on_error is True).
+ """
+ terminate_all_processes()
+
+
+def terminate_all_processes():
+ """
+ Goes through the process list and terminates all child processes.
+ """
+ for process in processes:
+ if process['process'].is_alive():
+ try:
+ process['process'].terminate()
+ except Exception:
+ pass
+ process['process'].join()
+
+
+def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
"""
Workload driver. Executes an action function against a nova-cluster.
-
"""
logfiles = admin_manager.config.stress.target_logfiles
log_check_interval = int(admin_manager.config.stress.log_check_interval)
@@ -105,7 +126,6 @@
computes = _get_compute_nodes(controller)
for node in computes:
do_ssh("rm -f %s" % logfiles, node)
- processes = []
for test in tests:
if test.get('use_admin', False):
manager = admin_manager
@@ -127,7 +147,7 @@
tenant_name=tenant_name)
test_obj = importutils.import_class(test['action'])
- test_run = test_obj(manager, logger, max_runs)
+ test_run = test_obj(manager, logger, max_runs, stop_on_error)
kwargs = test.get('kwargs', {})
test_run.setUp(**dict(kwargs.iteritems()))
@@ -150,6 +170,9 @@
processes.append(process)
p.start()
+ if stop_on_error:
+ # NOTE(mkoderer): only the parent should register the handler
+ signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
while True:
@@ -168,6 +191,11 @@
break
time.sleep(min(remaining, log_check_interval))
+ if stop_on_error:
+ for process in processes:
+ if process['statistic']['fails'] > 0:
+ break
+
if not logfiles:
continue
errors = _error_in_logs(logfiles, computes)
@@ -175,10 +203,7 @@
had_errors = True
break
- for process in processes:
- if process['process'].is_alive():
- process['process'].terminate()
- process['process'].join()
+ terminate_all_processes()
sum_fails = 0
sum_runs = 0
diff --git a/tempest/stress/etc/sample-test.json b/tempest/stress/etc/sample-test.json
deleted file mode 100644
index 494c823..0000000
--- a/tempest/stress/etc/sample-test.json
+++ /dev/null
@@ -1,7 +0,0 @@
-[{"action": "tempest.stress.actions.create_destroy_server.CreateDestroyServerTest",
- "threads": 8,
- "use_admin": false,
- "use_isolated_tenants": false,
- "kwargs": {}
- }
-]
diff --git a/tempest/stress/etc/server-create-destroy-test.json b/tempest/stress/etc/server-create-destroy-test.json
new file mode 100644
index 0000000..17d5e1a
--- /dev/null
+++ b/tempest/stress/etc/server-create-destroy-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ }
+]
diff --git a/tempest/stress/etc/stress-tox-job.json b/tempest/stress/etc/stress-tox-job.json
index 3534c26..dffc469 100644
--- a/tempest/stress/etc/stress-tox-job.json
+++ b/tempest/stress/etc/stress-tox-job.json
@@ -1,10 +1,10 @@
-[{"action": "tempest.stress.actions.create_destroy_server.CreateDestroyServerTest",
+[{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
"threads": 8,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
},
- {"action": "tempest.stress.actions.volume_create_delete.CreateDeleteTest",
+ {"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
"threads": 4,
"use_admin": false,
"use_isolated_tenants": false,
diff --git a/tempest/stress/etc/volume-assign-delete-test.json b/tempest/stress/etc/volume-attach-delete-test.json
similarity index 100%
rename from tempest/stress/etc/volume-assign-delete-test.json
rename to tempest/stress/etc/volume-attach-delete-test.json
diff --git a/tempest/stress/etc/volume-create-delete-test.json b/tempest/stress/etc/volume-create-delete-test.json
index 6325bdc..e8a58f7 100644
--- a/tempest/stress/etc/volume-create-delete-test.json
+++ b/tempest/stress/etc/volume-create-delete-test.json
@@ -1,4 +1,4 @@
-[{"action": "tempest.stress.actions.volume_create_delete.CreateDeleteTest",
+[{"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
"threads": 4,
"use_admin": false,
"use_isolated_tenants": false,
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
index 106049d..32e3ae0 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/stress/run_stress.py
@@ -22,7 +22,7 @@
def main(ns):
- #NOTE(kodererm): moved import to make "-h" possible without OpenStack
+ # NOTE(mkoderer): moved import to make "-h" possible without OpenStack
from tempest.stress import driver
result = 0
tests = json.load(open(ns.tests, 'r'))
@@ -30,12 +30,13 @@
for test in tests:
step_result = driver.stress_openstack([test],
ns.duration,
- ns.number)
- #NOTE(kodererm): we just save the last result code
+ ns.number,
+ ns.stop)
+ # NOTE(mkoderer): we just save the last result code
if (step_result != 0):
result = step_result
else:
- driver.stress_openstack(tests, ns.duration, ns.number)
+ driver.stress_openstack(tests, ns.duration, ns.number, ns.stop)
return result
@@ -44,6 +45,8 @@
help="Duration of test in secs.")
parser.add_argument('-s', '--serial', action='store_true',
help="Trigger running tests serially.")
+parser.add_argument('-S', '--stop', action='store_true',
+ default=False, help="Stop on first error.")
parser.add_argument('-n', '--number', type=int,
help="How often an action is executed for each process.")
parser.add_argument('tests', help="Name of the file with test description.")
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index 77ddd1c..ab09adc 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -20,10 +20,11 @@
class StressAction(object):
- def __init__(self, manager, logger, max_runs=None):
+ def __init__(self, manager, logger, max_runs=None, stop_on_error=False):
self.manager = manager
self.logger = logger
self.max_runs = max_runs
+ self.stop_on_error = stop_on_error
def _shutdown_handler(self, signal, frame):
self.tearDown()
@@ -63,6 +64,11 @@
self.logger.exception("Failure in run")
finally:
shared_statistic['runs'] += 1
+ if self.stop_on_error and (shared_statistic['fails'] > 1):
+ self.logger.warn("Stop process due to"
+ "\"stop-on-error\" argument")
+ self.tearDown()
+ sys.exit(1)
def run(self):
"""This method is where the stress test code runs."""
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index ba627e3..8812a10 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -170,7 +170,7 @@
add_cls = getattr(add_cls, part)
-#TODO(afazekas): classmethod handling
+# TODO(afazekas): classmethod handling
def friendly_function_name_simple(call_able):
name = ""
if hasattr(call_able, "im_class"):
@@ -224,7 +224,7 @@
"""Cancel Clean up request."""
del cls._resource_trash_bin[key]
- #TODO(afazekas): Add "with" context handling
+ # TODO(afazekas): Add "with" context handling
def assertBotoError(self, excMatcher, callableObj,
*args, **kwargs):
"""Example usage:
@@ -272,7 +272,7 @@
s3_error_code.server = ServerError()
s3_error_code.client = ClientError()
valid_image_state = set(('available', 'pending', 'failed'))
- #NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
+ # NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
# a good mapping, because it uses memory, but not really a running machine
valid_instance_state = set(('pending', 'running', 'shutting-down',
'terminated', 'stopping', 'stopped', 'paused'))
@@ -380,7 +380,7 @@
def assertAddressReleasedWait(self, address):
def _address_delete():
- #NOTE(afazekas): the filter gives back IP
+ # NOTE(afazekas): the filter gives back IP
# even if it is not associated to my tenant
if (address.public_ip not in map(lambda a: a.public_ip,
self.ec2_client.get_all_addresses())):
@@ -448,7 +448,7 @@
if cls.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
return "_GONE"
- #NOTE(afazekas): incorrect code,
+ # NOTE(afazekas): incorrect code,
# but the resource must be destoreyd
if exc.error_code == "InstanceNotFound":
return "_GONE"
@@ -465,7 +465,7 @@
if exc_num:
raise exceptions.TearDownException(num=exc_num)
- #NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
+ # NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
# to write better teardown
@classmethod
@@ -473,7 +473,7 @@
"""Delete group.
Use just for teardown!
"""
- #NOTE(afazekas): should wait/try until all related instance terminates
+ # NOTE(afazekas): should wait/try until all related instance terminates
group.delete()
@classmethod
@@ -487,7 +487,7 @@
LOG.critical("%s Volume has %s snapshot(s)", volume.id,
map(snaps.id, snaps))
- #Note(afazekas): detaching/attching not valid EC2 status
+ # NOTE(afazekas): detaching/attching not valid EC2 status
def _volume_state():
volume.update(validate=True)
try:
@@ -495,7 +495,7 @@
volume.detach(force=True)
except BaseException as exc:
LOG.exception(exc)
- #exc_num += 1 "nonlocal" not in python2
+ # exc_num += 1 "nonlocal" not in python2
return volume.status
try:
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index df2ff6a..5007503 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -231,7 +231,7 @@
else:
self.assertNotEqual(instance.state, "running")
- #NOTE(afazekas): doctored test case,
+ # NOTE(afazekas): doctored test case,
# with normal validation it would fail
@testtools.skip("Until Bug #1182679 is fixed")
@attr(type='smoke')
@@ -277,10 +277,10 @@
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
- #TODO(afazekas): ping test. dependecy/permission ?
+ # TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
- #NOTE(afazekas): it may be reports availble before it is available
+ # NOTE(afazekas): it may be reports availble before it is available
ssh = RemoteClient(address.public_ip,
self.os.config.compute.ssh_user,
@@ -304,7 +304,7 @@
self.assertVolumeStatusWait(_volume_state, "in-use")
re_search_wait(_volume_state, "in-use")
- #NOTE(afazekas): Different Hypervisor backends names
+ # NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
@@ -319,7 +319,7 @@
state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
- #TODO(afazekas): Resource compare to the flavor settings
+ # TODO(afazekas): Resource compare to the flavor settings
volume.detach()
@@ -340,7 +340,7 @@
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
- #TODO(afazekas): move steps from teardown to the test case
+ # TODO(afazekas): move steps from teardown to the test case
-#TODO(afazekas): Snapshot/volume read/write test case
+# TODO(afazekas): Snapshot/volume read/write test case
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
index 1072356..1b4d7ec 100644
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ b/tempest/thirdparty/boto/test_ec2_keys.py
@@ -37,7 +37,7 @@
cls.client = cls.os.ec2api_client
cls.ec = cls.ec2_error_code
-#TODO(afazekas): merge create, delete, get test cases
+# TODO(afazekas): merge create, delete, get test cases
@attr(type='smoke')
def test_create_ec2_keypair(self):
# EC2 create KeyPair
diff --git a/tempest/thirdparty/boto/test_ec2_network.py b/tempest/thirdparty/boto/test_ec2_network.py
index f4602d8..6226dbb 100644
--- a/tempest/thirdparty/boto/test_ec2_network.py
+++ b/tempest/thirdparty/boto/test_ec2_network.py
@@ -30,7 +30,7 @@
cls.os = clients.Manager()
cls.client = cls.os.ec2api_client
-#Note(afazekas): these tests for things duable without an instance
+# Note(afazekas): these tests for things duable without an instance
@testtools.skip("Skipped until the Bug #1080406 is resolved")
@attr(type='smoke')
def test_disassociate_not_associated_floating_ip(self):
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
index 3db9a88..81ddcf6 100644
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ b/tempest/thirdparty/boto/test_ec2_security_groups.py
@@ -43,7 +43,7 @@
group_get = groups_get[0]
self.assertEqual(group.name, group_get.name)
self.assertEqual(group.name, group_get.name)
- #ping (icmp_echo) and other icmp allowed from everywhere
+ # ping (icmp_echo) and other icmp allowed from everywhere
# from_port and to_port act as icmp type
success = self.client.authorize_security_group(group_name,
ip_protocol="icmp",
@@ -51,17 +51,17 @@
from_port=-1,
to_port=-1)
self.assertTrue(success)
- #allow standard ssh port from anywhere
+ # allow standard ssh port from anywhere
success = self.client.authorize_security_group(group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22)
self.assertTrue(success)
- #TODO(afazekas): Duplicate tests
+ # TODO(afazekas): Duplicate tests
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
- #remove listed rules
+ # remove listed rules
for ip_permission in group_get.rules:
for cidr in ip_permission.grants:
self.assertTrue(self.client.revoke_security_group(group_name,
@@ -72,5 +72,5 @@
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
- #all rules shuld be removed now
+ # all rules shuld be removed now
self.assertEqual(0, len(group_get.rules))
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index e2ca15f..26c2701 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -59,7 +59,7 @@
image["image_id"] = self.images_client.register_image(
name=image["name"],
image_location=image["location"])
- #Note(afazekas): delete_snapshot=True might trigger boto lib? bug
+ # NOTE(afazekas): delete_snapshot=True might trigger boto lib? bug
image["cleanUp"] = self.addResourceCleanUp(
self.images_client.deregister_image,
image["image_id"])
@@ -119,4 +119,4 @@
self.images_client.deregister_image(image["image_id"])
self.cancelResourceCleanUp(image["cleanUp"])
-#TODO(afazekas): less copy-paste style
+# TODO(afazekas): less copy-paste style
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
index d8fca3b..1507deb 100644
--- a/tempest/thirdparty/boto/utils/wait.py
+++ b/tempest/thirdparty/boto/utils/wait.py
@@ -34,7 +34,7 @@
def state_wait(lfunction, final_set=set(), valid_set=None):
- #TODO(afazekas): evaluate using ABC here
+ # TODO(afazekas): evaluate using ABC here
if not isinstance(final_set, set):
final_set = set((final_set,))
if not isinstance(valid_set, set) and valid_set is not None:
@@ -112,7 +112,7 @@
time.sleep(default_check_interval)
-#NOTE(afazekas): EC2/boto normally raise exception instead of empty list
+# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
def wait_exception(lfunction):
"""Returns with the exception or raises one."""
start_time = time.time()
@@ -129,4 +129,4 @@
dtime)
time.sleep(default_check_interval)
-#TODO(afazekas): consider strategy design pattern..
+# TODO(afazekas): consider strategy design pattern..
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
index 2694b95..1c1cdeb 100644
--- a/tempest/whitebox/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -25,7 +25,6 @@
@classmethod
def setUpClass(cls):
- raise cls.skipException("Until Bug 1034129 is fixed")
super(ServersWhiteboxTest, cls).setUpClass()
#NOTE(afazekas): Strange relationship
BaseIdentityAdminTest.setUpClass()
@@ -80,7 +79,7 @@
stmt = instances.select().where(instances.c.uuid == server['id'])
result = self.connection.execute(stmt).first()
- self.assertEqual(1, result.deleted)
+ self.assertEqual(True, result.deleted > 0)
self.assertEqual('deleted', result.vm_state)
self.assertEqual(None, result.task_state)
except Exception:
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
new file mode 100755
index 0000000..490d263
--- /dev/null
+++ b/tools/pretty_tox_serial.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+TESTRARGS=$@
+
+if [ ! -d .testrepository ]; then
+ testr init
+fi
+testr run --subunit $TESTRARGS | subunit-2to1 | tools/colorizer.py
+testr slowest
diff --git a/tox.ini b/tox.ini
index 93a53ac..c3562e6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -26,11 +26,17 @@
commands =
nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/api tempest/scenario tempest/thirdparty tempest/cli
+[testenv:testr-serial]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+commands =
+ sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+
[testenv:testr-full]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- sh tools/pretty_tox.sh 'tempest.api tempest.scenario tempest.thirdparty tempest.cli {posargs}'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
[testenv:smoke]
sitepackages = True