Merge "Improve cinder CLI existing tests"
diff --git a/README.rst b/README.rst
index 4393ae9..ea36619 100644
--- a/README.rst
+++ b/README.rst
@@ -130,3 +130,57 @@
unittest2.TestSuite instead. See:
https://code.google.com/p/unittest-ext/issues/detail?id=79
+
+Branchless Tempest Considerations
+---------------------------------
+
+Starting with the OpenStack Icehouse release Tempest no longer has any stable
+branches. This is to better ensure API consistency between releases because
+the API behavior should not change between releases. This means that the stable
+branches are also gated by the Tempest master branch, which also means that
+proposed commits to Tempest must work against both the master and all the
+currently supported stable branches of the projects. As such there are a few
+special considerations that have to be accounted for when pushing new changes
+to tempest.
+
+1. New Tests for new features
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When adding tests for new features that were not in previous releases of the
+projects the new test has to be properly skipped with a feature flag. Whether
+this is just as simple as using the @test.requires_ext() decorator to check
+if the required extension (or discoverable optional API) is enabled or adding
+a new config option to the appropriate section. If there isn't a method of
+selecting the new **feature** from the config file then there won't be a
+mechanism to disable the test with older stable releases and the new test won't
+be able to merge.
+
+2. Bug fix on core project needing Tempest changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When trying to land a bug fix which changes a tested API you'll have to use the
+following procedure::
+
+ - Propose change to the project, get a +2 on the change even with failing
+ - Propose skip on Tempest which will only be approved after the
+ corresponding change in the project has a +2 on change
+ - Land project change in master and all open stable branches (if required)
+ - Land changed test in Tempest
+
+Otherwise the bug fix won't be able to land in the project.
+
+3. New Tests for existing features
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If a test is being added for a feature that exists in all the current releases
+of the projects then the only concern is that the API behavior is the same
+across all the versions of the project being tested. If the behavior is not
+consistent the test will not be able to merge.
+
+API Stability
+-------------
+
+For new tests being added to Tempest the assumption is that the API being
+tested is considered stable and adheres to the OpenStack API stability
+guidelines. If an API is still considered experimental or in development then
+it should not be tested by Tempest until it is considered stable.
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 9051310..858fce9 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -109,6 +109,9 @@
# value)
#driver_enabled=false
+# Driver name which Ironic uses (string value)
+#driver=fake
+
# The endpoint type to use for the baremetal provisioning
# service (string value)
#endpoint_type=publicURL
@@ -119,13 +122,13 @@
# Timeout for association of Nova instance and Ironic node
# (integer value)
-#association_timeout=10
+#association_timeout=30
# Timeout for Ironic power transitions. (integer value)
-#power_timeout=20
+#power_timeout=60
# Timeout for unprovisioning an Ironic node. (integer value)
-#unprovision_timeout=20
+#unprovision_timeout=60
[boto]
@@ -424,6 +427,10 @@
# as [nova.rdp]->enabled in nova.conf (boolean value)
#rdp_console=false
+# Does the test environment support instance rescue mode?
+# (boolean value)
+#rescue=true
+
[dashboard]
@@ -736,6 +743,11 @@
# all which indicates every extension is enabled (list value)
#api_extensions=all
+# Allow the execution of IPv6 subnet tests that use the
+# extended IPv6 attributes ipv6_ra_mode and ipv6_address_mode
+# (boolean value)
+#ipv6_subnet_attributes=false
+
[object-storage]
@@ -841,6 +853,28 @@
# queues (integer value)
#max_queues_per_page=20
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata=65536
+
+# The maximum number of queue message per page when listing
+# (or) posting messages (integer value)
+#max_messages_per_page=20
+
+# The maximum size of a message body (integer value)
+#max_message_size=262144
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim=20
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl=1209600
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl=43200
+
+# The maximum grace period for a claim (integer value)
+#max_claim_grace=43200
+
[scenario]
diff --git a/tempest/api/baremetal/test_drivers.py b/tempest/api/baremetal/test_drivers.py
index 445ca60..5e1e310 100644
--- a/tempest/api/baremetal/test_drivers.py
+++ b/tempest/api/baremetal/test_drivers.py
@@ -13,14 +13,23 @@
# under the License.
from tempest.api.baremetal import base
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class TestDrivers(base.BaseBaremetalTest):
"""Tests for drivers."""
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(TestDrivers, cls).setUpClass()
+ cls.driver_name = CONF.baremetal.driver
@test.attr(type="smoke")
def test_list_drivers(self):
resp, drivers = self.client.list_drivers()
self.assertEqual('200', resp['status'])
- self.assertIn('fake', [d['name'] for d in drivers['drivers']])
+ self.assertIn(self.driver_name,
+ [d['name'] for d in drivers['drivers']])
diff --git a/tempest/api/baremetal/test_nodes.py b/tempest/api/baremetal/test_nodes.py
index b6432ad..1572840 100644
--- a/tempest/api/baremetal/test_nodes.py
+++ b/tempest/api/baremetal/test_nodes.py
@@ -87,3 +87,11 @@
resp, node = self.client.show_node(node['uuid'])
self.assertEqual('200', resp['status'])
self._assertExpected(new_p, node['properties'])
+
+ @test.attr(type='smoke')
+ def test_validate_driver_interface(self):
+ resp, body = self.client.validate_driver_interface(self.node['uuid'])
+ self.assertEqual('200', resp['status'])
+ core_interfaces = ['power', 'deploy']
+ for interface in core_interfaces:
+ self.assertIn(interface, body)
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index c2af29a..4ac7e29 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -145,6 +145,19 @@
self.validate_self_link('ports', port['uuid'],
port['links'][0]['href'])
+ def test_list_ports_details_with_address(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ self.create_port(node_id=node_id, address=address)
+ for i in range(0, 5):
+ self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+
+ resp, body = self.client.list_ports_detail(address=address)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(1, len(body['ports']))
+ self.assertEqual(address, body['ports'][0]['address'])
+
@test.attr(type='smoke')
def test_update_port_replace(self):
node_id = self.node['uuid']
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index e1dc685..f147b9c 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -44,6 +44,7 @@
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
+ @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
@@ -57,8 +58,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
- self.assertRaises(exceptions.OverLimit, self.create_test_server)
+ self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
@@ -72,8 +74,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
- self.assertRaises(exceptions.OverLimit, self.create_test_server)
+ self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
@@ -86,7 +89,7 @@
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
- self.assertRaises(exceptions.OverLimit, self.create_test_server)
+ self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 8b3a0b5..cccaf13 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -54,6 +54,9 @@
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
+ @test.skip_because(bug="1298131")
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
flavor_name = data_utils.rand_name("flavor-")
@@ -67,11 +70,14 @@
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
- self.assertRaises(exceptions.OverLimit,
+ self.assertRaises(exceptions.Unauthorized,
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
+ @test.skip_because(bug="1298131")
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
flavor_name = data_utils.rand_name("flavor-")
@@ -85,7 +91,7 @@
ram, vcpus, disk,
flavor_id)
self.addCleanup(self.flavors_client.delete_flavor, flavor_id)
- self.assertRaises(exceptions.OverLimit,
+ self.assertRaises(exceptions.Unauthorized,
self.client.resize,
self.servers[0]['id'],
flavor_ref['id'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 7c70aec..a1aaa95 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -270,7 +270,7 @@
if not policy:
policy = ['affinity']
resp, body = cls.servers_client.create_server_group(name, policy)
- cls.server_groups.append(body)
+ cls.server_groups.append(body['id'])
return resp, body
def wait_for(self, condition):
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 0e6b9d6..c1c2d05 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -89,44 +89,40 @@
@test.attr(type='gate')
def test_list_flavors_detailed_filter_by_min_disk(self):
# The detailed list of flavors should be filtered by disk space
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['disk'])
- flavor_id = flavors[0]['id']
+ resp, flavor = self.client.get_flavor_details(self.flavor_ref)
+ flavor_id = flavor['id']
- params = {self._min_disk: flavors[0]['disk'] + 1}
+ params = {self._min_disk: flavor['disk'] + 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@test.attr(type='gate')
def test_list_flavors_detailed_filter_by_min_ram(self):
# The detailed list of flavors should be filtered by RAM
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['ram'])
- flavor_id = flavors[0]['id']
+ resp, flavor = self.client.get_flavor_details(self.flavor_ref)
+ flavor_id = flavor['id']
- params = {self._min_ram: flavors[0]['ram'] + 1}
+ params = {self._min_ram: flavor['ram'] + 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@test.attr(type='gate')
def test_list_flavors_filter_by_min_disk(self):
# The list of flavors should be filtered by disk space
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['disk'])
- flavor_id = flavors[0]['id']
+ resp, flavor = self.client.get_flavor_details(self.flavor_ref)
+ flavor_id = flavor['id']
- params = {self._min_disk: flavors[0]['disk'] + 1}
+ params = {self._min_disk: flavor['disk'] + 1}
resp, flavors = self.client.list_flavors(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@test.attr(type='gate')
def test_list_flavors_filter_by_min_ram(self):
# The list of flavors should be filtered by RAM
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['ram'])
- flavor_id = flavors[0]['id']
+ resp, flavor = self.client.get_flavor_details(self.flavor_ref)
+ flavor_id = flavor['id']
- params = {self._min_ram: flavors[0]['ram'] + 1}
+ params = {self._min_ram: flavor['ram'] + 1}
resp, flavors = self.client.list_flavors(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index e135eca..279dc51 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -102,6 +102,28 @@
self.password)
self.assertTrue(linux_client.hostname_equals_servername(self.name))
+ @test.skip_because(bug="1306367", interface="xml")
+ @test.attr(type='gate')
+ def test_create_server_with_scheduler_hint_group(self):
+ # Create a server with the scheduler hint "group".
+ name = data_utils.rand_name('server_group')
+ policies = ['affinity']
+ resp, body = self.client.create_server_group(name=name,
+ policies=policies)
+ self.assertEqual(200, resp.status)
+ group_id = body['id']
+ self.addCleanup(self.client.delete_server_group, group_id)
+
+ hints = {'group': group_id}
+ resp, server = self.create_test_server(sched_hints=hints,
+ wait_until='ACTIVE')
+ self.assertEqual(202, resp.status)
+
+ # Check a server is in the group
+ resp, server_group = self.client.get_server_group(group_id)
+ self.assertEqual(200, resp.status)
+ self.assertIn(server['id'], server_group['members'])
+
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index d0fd876..71fcbff 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -14,6 +14,7 @@
# under the License.
import base64
+import logging
import testtools
import urlparse
@@ -27,6 +28,8 @@
CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
class ServerActionsTestJSON(base.BaseV2ComputeTest):
run_ssh = CONF.compute.run_ssh
@@ -267,7 +270,14 @@
# the oldest one should be deleted automatically in this test
def _clean_oldest_backup(oldest_backup):
if oldest_backup_exist:
- self.os.image_client.delete_image(oldest_backup)
+ try:
+ self.os.image_client.delete_image(oldest_backup)
+ except exceptions.NotFound:
+ pass
+ else:
+ LOG.warning("Deletion of oldest backup %s should not have "
+ "been successful as it should have been "
+ "deleted during rotation." % oldest_backup)
image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 0cd23fd..f1ef5d5 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -75,6 +75,7 @@
policy = ['anti-affinity']
self._create_delete_server_group(policy)
+ @test.skip_because(bug="1324348")
@test.attr(type='gate')
def test_create_delete_server_group_with_multiple_policies(self):
# Create and Delete the server-group with multiple policies
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 448b8ff..01ff6b9 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -24,10 +24,7 @@
super(ServerMetadataTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
-
cls.server_id = server['id']
def setUp(self):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 093e9e2..ab98d88 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -15,14 +15,21 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class ServerRescueTestJSON(base.BaseV2ComputeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
+ if not CONF.compute_feature_enabled.rescue:
+ msg = "Server rescue not available."
+ raise cls.skipException(msg)
+
cls.set_network_resources(network=True, subnet=True, router=True)
super(ServerRescueTestJSON, cls).setUpClass()
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index dae4709..b35e55c 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -28,6 +28,10 @@
@classmethod
@test.safe_setup
def setUpClass(cls):
+ if not CONF.compute_feature_enabled.rescue:
+ msg = "Server rescue not available."
+ raise cls.skipException(msg)
+
cls.set_network_resources(network=True, subnet=True, router=True)
super(ServerRescueNegativeTestJSON, cls).setUpClass()
cls.device = 'vdf'
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 6343ead..d3297ce 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -106,6 +106,8 @@
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv6=IPv6)
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
@@ -114,6 +116,8 @@
self.client.resize,
nonexistent_server, self.flavor_ref)
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
@@ -121,6 +125,8 @@
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 375ddf8..fb8ded3 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import StringIO
+
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
@@ -27,9 +29,10 @@
class AuthorizationTestJSON(base.BaseV2ComputeTest):
-
@classmethod
def setUpClass(cls):
+ if not CONF.service_available.glance:
+ raise cls.skipException('Glance is not available.')
# No network resources required for this test
cls.set_network_resources()
super(AuthorizationTestJSON, cls).setUpClass()
@@ -38,6 +41,7 @@
raise cls.skipException(msg)
cls.client = cls.os.servers_client
cls.images_client = cls.os.images_client
+ cls.glance_client = cls.os.image_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_client = cls.os.security_groups_client
@@ -57,9 +61,14 @@
resp, cls.server = cls.client.get_server(server['id'])
name = data_utils.rand_name('image')
- resp, body = cls.images_client.create_image(server['id'], name)
- image_id = data_utils.parse_image_id(resp['location'])
- cls.images_client.wait_for_image_status(image_id, 'ACTIVE')
+ resp, body = cls.glance_client.create_image(name=name,
+ container_format='bare',
+ disk_format='raw',
+ is_public=False)
+ image_id = body['id']
+ image_file = StringIO.StringIO(('*' * 1024))
+ resp, body = cls.glance_client.update_image(image_id, data=image_file)
+ cls.glance_client.wait_for_image_status(image_id, 'active')
resp, cls.image = cls.images_client.get_image(image_id)
cls.keypairname = data_utils.rand_name('keypair')
diff --git a/tempest/api/compute/v3/admin/test_flavors.py b/tempest/api/compute/v3/admin/test_flavors.py
index 8a4e3cf..09d76b8 100644
--- a/tempest/api/compute/v3/admin/test_flavors.py
+++ b/tempest/api/compute/v3/admin/test_flavors.py
@@ -294,7 +294,7 @@
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
- ram = " 1024 "
+ ram = "1024"
resp, flavor = self.client.create_flavor(flavor_name,
ram, self.vcpus,
self.disk,
diff --git a/tempest/api/compute/v3/admin/test_flavors_negative.py b/tempest/api/compute/v3/admin/test_flavors_negative.py
index 3f8a2da..6d3308e 100644
--- a/tempest/api/compute/v3/admin/test_flavors_negative.py
+++ b/tempest/api/compute/v3/admin/test_flavors_negative.py
@@ -57,7 +57,7 @@
resp, flavor = self.client.create_flavor(flavor_name,
self.ram,
self.vcpus, self.disk,
- '',
+ None,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx)
diff --git a/tempest/api/compute/v3/admin/test_quotas_negative.py b/tempest/api/compute/v3/admin/test_quotas_negative.py
index 307462f..7739f09 100644
--- a/tempest/api/compute/v3/admin/test_quotas_negative.py
+++ b/tempest/api/compute/v3/admin/test_quotas_negative.py
@@ -34,6 +34,7 @@
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
+ @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
@@ -47,8 +48,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
- self.assertRaises(exceptions.OverLimit, self.create_test_server)
+ self.assertRaises(exceptions.Unauthorized, self.create_test_server)
+ @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
@@ -62,7 +64,7 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
- self.assertRaises(exceptions.OverLimit, self.create_test_server)
+ self.assertRaises(exceptions.Unauthorized, self.create_test_server)
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
@@ -71,6 +73,7 @@
self.demo_tenant_id,
ram=0)
+ @test.skip_because(bug="1298131")
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
@@ -83,4 +86,4 @@
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
- self.assertRaises(exceptions.OverLimit, self.create_test_server)
+ self.assertRaises(exceptions.Unauthorized, self.create_test_server)
diff --git a/tempest/api/compute/v3/servers/test_instance_actions.py b/tempest/api/compute/v3/servers/test_instance_actions.py
index 399541b..64339b8 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions.py
@@ -40,12 +40,10 @@
self.assertTrue(any([i for i in body if i['action'] == 'create']))
self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
- @test.skip_because(bug="1206032")
@test.attr(type='gate')
- @test.skip_because(bug="1281915")
def test_get_server_action(self):
# Get the action details of the provided server
- request_id = self.resp['x-compute-request-id']
+ request_id = self.resp['x-openstack-request-id']
resp, body = self.client.get_server_action(self.server_id,
request_id)
self.assertEqual(200, resp.status)
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index e098311..3ee8050 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+
import testtools
import urlparse
@@ -25,6 +27,8 @@
CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
class ServerActionsV3Test(base.BaseV3ComputeTest):
run_ssh = CONF.compute.run_ssh
@@ -260,7 +264,14 @@
# the oldest one should be deleted automatically in this test
def _clean_oldest_backup(oldest_backup):
if oldest_backup_exist:
- self.images_client.delete_image(oldest_backup)
+ try:
+ self.images_client.delete_image(oldest_backup)
+ except exceptions.NotFound:
+ pass
+ else:
+ LOG.warning("Deletion of oldest backup %s should not have "
+ "been successful as it should have been "
+ "deleted during rotation." % oldest_backup)
image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
diff --git a/tempest/api/compute/v3/servers/test_server_metadata.py b/tempest/api/compute/v3/servers/test_server_metadata.py
index 298cd3c..c5443ee 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata.py
@@ -24,9 +24,7 @@
super(ServerMetadataV3Test, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
-
cls.server_id = server['id']
def setUp(self):
diff --git a/tempest/api/compute/v3/servers/test_server_rescue.py b/tempest/api/compute/v3/servers/test_server_rescue.py
index b3dcb51..da58f26 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue.py
@@ -14,13 +14,19 @@
# under the License.
from tempest.api.compute import base
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class ServerRescueV3Test(base.BaseV3ComputeTest):
@classmethod
def setUpClass(cls):
+ if not CONF.compute_feature_enabled.rescue:
+ msg = "Server rescue not available."
+ raise cls.skipException(msg)
super(ServerRescueV3Test, cls).setUpClass()
# Server for positive tests
diff --git a/tempest/api/compute/v3/servers/test_server_rescue_negative.py b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
index eb6bcdd..5eb6c9a 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
@@ -28,6 +28,10 @@
@classmethod
@test.safe_setup
def setUpClass(cls):
+ if not CONF.compute_feature_enabled.rescue:
+ msg = "Server rescue not available."
+ raise cls.skipException(msg)
+
super(ServerRescueNegativeV3Test, cls).setUpClass()
cls.device = 'vdf'
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 0d6773c..ab0e83a 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -29,8 +29,8 @@
if not CONF.service_available.sahara:
raise cls.skipException('Sahara support is required')
- os = cls.get_client_manager()
- cls.client = os.data_processing_client
+ cls.os = cls.get_client_manager()
+ cls.client = cls.os.data_processing_client
cls.flavor_ref = CONF.compute.flavor_ref
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
new file mode 100644
index 0000000..689c1fe
--- /dev/null
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -0,0 +1,148 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class JobBinaryTest(dp_base.BaseDataProcessingTest):
+ """Link to the API documentation is http://docs.openstack.org/developer/
+ sahara/restapi/rest_api_v1.1_EDP.html#job-binaries
+ """
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(JobBinaryTest, cls).setUpClass()
+ cls.swift_job_binary_with_extra = {
+ 'url': 'swift://sahara-container.sahara/example.jar',
+ 'description': 'Test job binary',
+ 'extra': {
+ 'user': cls.os.credentials.username,
+ 'password': cls.os.credentials.password
+ }
+ }
+ # Create extra cls.swift_job_binary variable to use for comparison to
+ # job binary response body because response body has no 'extra' field.
+ cls.swift_job_binary = cls.swift_job_binary_with_extra.copy()
+ del cls.swift_job_binary['extra']
+
+ name = data_utils.rand_name('sahara-internal-job-binary')
+ cls.job_binary_data = 'Some script may be data'
+ job_binary_internal = cls.create_job_binary_internal(
+ name, cls.job_binary_data)[1]
+ cls.internal_db_job_binary = {
+ 'url': 'internal-db://%s' % job_binary_internal['id'],
+ 'description': 'Test job binary',
+ }
+
+ def _create_job_binary(self, binary_body, binary_name=None):
+ """Creates Job Binary with optional name specified.
+
+ It creates a link to data (jar, pig files, etc.) and ensures response
+ status, job binary name and response body. Returns id and name of
+ created job binary. Data may not exist when using Swift
+ as data storage. In other cases data must exist in storage.
+ """
+ if not binary_name:
+ # generate random name if it's not specified
+ binary_name = data_utils.rand_name('sahara-job-binary')
+
+ # create job binary
+ resp, body = self.create_job_binary(binary_name, **binary_body)
+
+ # ensure that binary created successfully
+ self.assertEqual(202, resp.status)
+ self.assertEqual(binary_name, body['name'])
+ if 'swift' in binary_body['url']:
+ binary_body = self.swift_job_binary
+ self.assertDictContainsSubset(binary_body, body)
+
+ return body['id'], binary_name
+
+ @test.attr(type='smoke')
+ def test_swift_job_binary_create(self):
+ self._create_job_binary(self.swift_job_binary_with_extra)
+
+ @test.attr(type='smoke')
+ def test_swift_job_binary_list(self):
+ binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
+
+ # check for job binary in list
+ resp, binaries = self.client.list_job_binaries()
+ self.assertEqual(200, resp.status)
+ binaries_info = [(binary['id'], binary['name']) for binary in binaries]
+ self.assertIn(binary_info, binaries_info)
+
+ @test.attr(type='smoke')
+ def test_swift_job_binary_get(self):
+ binary_id, binary_name = self._create_job_binary(
+ self.swift_job_binary_with_extra)
+
+ # check job binary fetch by id
+ resp, binary = self.client.get_job_binary(binary_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(binary_name, binary['name'])
+ self.assertDictContainsSubset(self.swift_job_binary, binary)
+
+ @test.attr(type='smoke')
+ def test_swift_job_binary_delete(self):
+ binary_id = self._create_job_binary(
+ self.swift_job_binary_with_extra)[0]
+
+ # delete the job binary by id
+ resp = self.client.delete_job_binary(binary_id)[0]
+ self.assertEqual(204, resp.status)
+
+ @test.attr(type='smoke')
+ def test_internal_db_job_binary_create(self):
+ self._create_job_binary(self.internal_db_job_binary)
+
+ @test.attr(type='smoke')
+ def test_internal_db_job_binary_list(self):
+ binary_info = self._create_job_binary(self.internal_db_job_binary)
+
+ # check for job binary in list
+ resp, binaries = self.client.list_job_binaries()
+ self.assertEqual(200, resp.status)
+ binaries_info = [(binary['id'], binary['name']) for binary in binaries]
+ self.assertIn(binary_info, binaries_info)
+
+ @test.attr(type='smoke')
+ def test_internal_db_job_binary_get(self):
+ binary_id, binary_name = self._create_job_binary(
+ self.internal_db_job_binary)
+
+ # check job binary fetch by id
+ resp, binary = self.client.get_job_binary(binary_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(binary_name, binary['name'])
+ self.assertDictContainsSubset(self.internal_db_job_binary, binary)
+
+ @test.attr(type='smoke')
+ def test_internal_db_job_binary_delete(self):
+ binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+
+ # delete the job binary by id
+ resp = self.client.delete_job_binary(binary_id)[0]
+ self.assertEqual(204, resp.status)
+
+ @test.attr(type='smoke')
+ def test_job_binary_get_data(self):
+ binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+
+ # get data of job binary by id
+ resp, data = self.client.get_job_binary_data(binary_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(data, self.job_binary_data)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index dd3b576..881f69e 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -108,22 +108,22 @@
s_name = data_utils.rand_name('service-')
s_type = data_utils.rand_name('type--')
s_description = data_utils.rand_name('description-')
- resp, self.service2 =\
+ resp, service2 =\
self.service_client.create_service(s_name, s_type,
description=s_description)
- self.service_ids.append(self.service2['id'])
+ self.service_ids.append(service2['id'])
# Updating endpoint with new values
region2 = data_utils.rand_name('region')
url2 = data_utils.rand_name('url')
interface2 = 'internal'
resp, endpoint = \
self.client.update_endpoint(endpoint_for_update['id'],
- service_id=self.service2['id'],
+ service_id=service2['id'],
interface=interface2, url=url2,
region=region2, enabled=False)
self.assertEqual(resp['status'], '200')
# Asserting if the attributes of endpoint are updated
- self.assertEqual(self.service2['id'], endpoint['service_id'])
+ self.assertEqual(service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
self.assertEqual(url2, endpoint['url'])
self.assertEqual(region2, endpoint['region'])
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 9981292..02d391b 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -142,9 +142,9 @@
def _create_image(self):
name = data_utils.rand_name('image')
- resp, image = self.os_img_client.create_image(name,
- container_format='bare',
- disk_format='raw')
+ _, image = self.os_img_client.create_image(name,
+ container_format='bare',
+ disk_format='raw')
image_id = image['id']
self.addCleanup(self.os_img_client.delete_image, image_id)
return image_id
diff --git a/tempest/api/image/v1/test_image_members.py b/tempest/api/image/v1/test_image_members.py
index 4cbb62f..f91cb69 100644
--- a/tempest/api/image/v1/test_image_members.py
+++ b/tempest/api/image/v1/test_image_members.py
@@ -22,27 +22,21 @@
@test.attr(type='gate')
def test_add_image_member(self):
image = self._create_image()
- resp = self.client.add_member(self.alt_tenant_id, image)
- self.assertEqual(204, resp.status)
- resp, body = self.client.get_image_membership(image)
- self.assertEqual(200, resp.status)
+ self.client.add_member(self.alt_tenant_id, image)
+ _, body = self.client.get_image_membership(image)
members = body['members']
members = map(lambda x: x['member_id'], members)
self.assertIn(self.alt_tenant_id, members)
# get image as alt user
- resp, body = self.alt_img_cli.get_image(image)
- self.assertEqual(200, resp.status)
+ self.alt_img_cli.get_image(image)
@test.attr(type='gate')
def test_get_shared_images(self):
image = self._create_image()
- resp = self.client.add_member(self.alt_tenant_id, image)
- self.assertEqual(204, resp.status)
+ self.client.add_member(self.alt_tenant_id, image)
share_image = self._create_image()
- resp = self.client.add_member(self.alt_tenant_id, share_image)
- self.assertEqual(204, resp.status)
- resp, body = self.client.get_shared_images(self.alt_tenant_id)
- self.assertEqual(200, resp.status)
+ self.client.add_member(self.alt_tenant_id, share_image)
+ _, body = self.client.get_shared_images(self.alt_tenant_id)
images = body['shared_images']
images = map(lambda x: x['image_id'], images)
self.assertIn(share_image, images)
@@ -51,11 +45,8 @@
@test.attr(type='gate')
def test_remove_member(self):
image_id = self._create_image()
- resp = self.client.add_member(self.alt_tenant_id, image_id)
- self.assertEqual(204, resp.status)
- resp = self.client.delete_member(self.alt_tenant_id, image_id)
- self.assertEqual(204, resp.status)
- resp, body = self.client.get_image_membership(image_id)
- self.assertEqual(200, resp.status)
+ self.client.add_member(self.alt_tenant_id, image_id)
+ self.client.delete_member(self.alt_tenant_id, image_id)
+ _, body = self.client.get_image_membership(image_id)
members = body['members']
self.assertEqual(0, len(members), str(members))
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 2cc2009..8528e42 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -30,11 +30,11 @@
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
- resp, body = self.create_image(name='New Name',
- container_format='bare',
- disk_format='raw',
- is_public=False,
- properties=properties)
+ _, body = self.create_image(name='New Name',
+ container_format='bare',
+ disk_format='raw',
+ is_public=False,
+ properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
@@ -45,19 +45,19 @@
# Now try uploading an image file
image_file = StringIO.StringIO(('*' * 1024))
- resp, body = self.client.update_image(image_id, data=image_file)
+ _, body = self.client.update_image(image_id, data=image_file)
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@test.attr(type='gate')
def test_register_remote_image(self):
# Register a new remote image
- resp, body = self.create_image(name='New Remote Image',
- container_format='bare',
- disk_format='raw', is_public=False,
- location=CONF.image.http_image,
- properties={'key1': 'value1',
- 'key2': 'value2'})
+ _, body = self.create_image(name='New Remote Image',
+ container_format='bare',
+ disk_format='raw', is_public=False,
+ location=CONF.image.http_image,
+ properties={'key1': 'value1',
+ 'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
@@ -68,28 +68,27 @@
@test.attr(type='gate')
def test_register_http_image(self):
- resp, body = self.create_image(name='New Http Image',
- container_format='bare',
- disk_format='raw', is_public=False,
- copy_from=CONF.image.http_image)
+ _, body = self.create_image(name='New Http Image',
+ container_format='bare',
+ disk_format='raw', is_public=False,
+ copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
- resp, body = self.client.get_image(image_id)
- self.assertEqual(resp['status'], '200')
+ self.client.get_image(image_id)
@test.attr(type='gate')
def test_register_image_with_min_ram(self):
# Register an image with min ram
properties = {'prop1': 'val1'}
- resp, body = self.create_image(name='New_image_with_min_ram',
- container_format='bare',
- disk_format='raw',
- is_public=False,
- min_ram=40,
- properties=properties)
+ _, body = self.create_image(name='New_image_with_min_ram',
+ container_format='bare',
+ disk_format='raw',
+ is_public=False,
+ min_ram=40,
+ properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
@@ -97,8 +96,7 @@
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
- resp, body = self.client.delete_image(body['id'])
- self.assertEqual('200', resp['status'])
+ self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
@@ -143,11 +141,11 @@
"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
- resp, image = cls.create_image(name=name,
- container_format=container_format,
- disk_format=disk_format,
- is_public=False,
- location=location)
+ _, image = cls.create_image(name=name,
+ container_format=container_format,
+ disk_format=disk_format,
+ is_public=False,
+ location=location)
image_id = image['id']
return image_id
@@ -161,26 +159,24 @@
"""
image_file = StringIO.StringIO('*' * size)
name = 'New Standard Image %s' % name
- resp, image = cls.create_image(name=name,
- container_format=container_format,
- disk_format=disk_format,
- is_public=False, data=image_file)
+ _, image = cls.create_image(name=name,
+ container_format=container_format,
+ disk_format=disk_format,
+ is_public=False, data=image_file)
image_id = image['id']
return image_id
@test.attr(type='gate')
def test_index_no_params(self):
# Simple test to see all fixture images returned
- resp, images_list = self.client.image_list()
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list()
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@test.attr(type='gate')
def test_index_disk_format(self):
- resp, images_list = self.client.image_list(disk_format='ami')
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list(disk_format='ami')
for image in images_list:
self.assertEqual(image['disk_format'], 'ami')
result_set = set(map(lambda x: x['id'], images_list))
@@ -189,8 +185,7 @@
@test.attr(type='gate')
def test_index_container_format(self):
- resp, images_list = self.client.image_list(container_format='bare')
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list(container_format='bare')
for image in images_list:
self.assertEqual(image['container_format'], 'bare')
result_set = set(map(lambda x: x['id'], images_list))
@@ -199,8 +194,7 @@
@test.attr(type='gate')
def test_index_max_size(self):
- resp, images_list = self.client.image_list(size_max=42)
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list(size_max=42)
for image in images_list:
self.assertTrue(image['size'] <= 42)
result_set = set(map(lambda x: x['id'], images_list))
@@ -209,8 +203,7 @@
@test.attr(type='gate')
def test_index_min_size(self):
- resp, images_list = self.client.image_list(size_min=142)
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list(size_min=142)
for image in images_list:
self.assertTrue(image['size'] >= 142)
result_set = set(map(lambda x: x['id'], images_list))
@@ -219,10 +212,9 @@
@test.attr(type='gate')
def test_index_status_active_detail(self):
- resp, images_list = self.client.image_list_detail(status='active',
- sort_key='size',
- sort_dir='desc')
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list_detail(status='active',
+ sort_key='size',
+ sort_dir='desc')
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
@@ -232,9 +224,8 @@
@test.attr(type='gate')
def test_index_name(self):
- resp, images_list = self.client.image_list_detail(
+ _, images_list = self.client.image_list_detail(
name='New Remote Image dup')
- self.assertEqual(resp['status'], '200')
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
@@ -266,10 +257,10 @@
cls.snapshot_set = set((cls.snapshot,))
image_file = StringIO.StringIO('*' * 42)
- resp, image = cls.create_image(name="Standard Image",
- container_format='ami',
- disk_format='ami',
- is_public=False, data=image_file)
+ _, image = cls.create_image(name="Standard Image",
+ container_format='ami',
+ disk_format='ami',
+ is_public=False, data=image_file)
cls.image_id = image['id']
cls.client.wait_for_image_status(image['id'], 'active')
@@ -281,7 +272,7 @@
@classmethod
def _create_snapshot(cls, name, image_id, flavor, **kwargs):
- resp, server = cls.servers_client.create_server(
+ _, server = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
cls.servers.append(server)
cls.servers_client.wait_for_server_status(
@@ -297,9 +288,8 @@
@test.services('compute')
def test_index_server_id(self):
# The images should contain images filtered by server id
- resp, images = self.client.image_list_detail(
+ _, images = self.client.image_list_detail(
{'instance_uuid': self.servers[0]['id']})
- self.assertEqual(200, resp.status)
result_set = set(map(lambda x: x['id'], images))
self.assertEqual(self.snapshot_set, result_set)
@@ -308,9 +298,8 @@
def test_index_type(self):
# The list of servers should be filtered by image type
params = {'image_type': 'snapshot'}
- resp, images = self.client.image_list_detail(params)
+ _, images = self.client.image_list_detail(params)
- self.assertEqual(200, resp.status)
result_set = set(map(lambda x: x['id'], images))
self.assertIn(self.snapshot, result_set)
@@ -318,9 +307,8 @@
@test.services('compute')
def test_index_limit(self):
# Verify only the expected number of results are returned
- resp, images = self.client.image_list_detail(limit=1)
+ _, images = self.client.image_list_detail(limit=1)
- self.assertEqual(200, resp.status)
self.assertEqual(1, len(images))
@test.attr(type='gate')
@@ -329,13 +317,11 @@
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
- resp, image = self.client.get_image_meta(self.snapshot)
- self.assertEqual(200, resp.status)
+ _, image = self.client.get_image_meta(self.snapshot)
self.assertEqual(self.snapshot, image['id'])
- resp, images = self.client.image_list_detail(
+ _, images = self.client.image_list_detail(
changes_since=image['updated_at'])
- self.assertEqual(200, resp.status)
result_set = set(map(lambda x: x['id'], images))
self.assertIn(self.image_id, result_set)
self.assertNotIn(self.snapshot, result_set)
@@ -357,18 +343,18 @@
"""
image_file = StringIO.StringIO('*' * size)
name = 'New Standard Image %s' % name
- resp, image = cls.create_image(name=name,
- container_format=container_format,
- disk_format=disk_format,
- is_public=False, data=image_file,
- properties={'key1': 'value1'})
+ _, image = cls.create_image(name=name,
+ container_format=container_format,
+ disk_format=disk_format,
+ is_public=False, data=image_file,
+ properties={'key1': 'value1'})
image_id = image['id']
return image_id
@test.attr(type='gate')
def test_list_image_metadata(self):
# All metadata key/value pairs for an image should be returned
- resp, resp_metadata = self.client.get_image_meta(self.image_id)
+ _, resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@@ -376,13 +362,12 @@
def test_update_image_metadata(self):
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
- resp, metadata = self.client.get_image_meta(self.image_id)
- self.assertEqual(200, resp.status)
+ _, metadata = self.client.get_image_meta(self.image_id)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
- resp, metadata = self.client.update_image(
+ _, metadata = self.client.update_image(
self.image_id, properties=metadata['properties'])
- resp, resp_metadata = self.client.get_image_meta(self.image_id)
+ _, resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'alt1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata['properties'])
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 37dc163..ae777eb 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -37,11 +37,11 @@
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
- resp, body = self.create_image(name=image_name,
- container_format='bare',
- disk_format='raw',
- visibility='private',
- ramdisk_id=uuid)
+ _, body = self.create_image(name=image_name,
+ container_format='bare',
+ disk_format='raw',
+ visibility='private',
+ ramdisk_id=uuid)
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
@@ -54,12 +54,10 @@
# Now try uploading an image file
file_content = '*' * 1024
image_file = StringIO.StringIO(file_content)
- resp, body = self.client.store_image(image_id, image_file)
- self.assertEqual(resp.status, 204)
+ self.client.store_image(image_id, image_file)
# Now try to get image details
- resp, body = self.client.get_image(image_id)
- self.assertEqual(200, resp.status)
+ _, body = self.client.get_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
@@ -67,8 +65,7 @@
self.assertEqual(1024, body.get('size'))
# Now try get image file
- resp, body = self.client.get_image_file(image_id)
- self.assertEqual(200, resp.status)
+ _, body = self.client.get_image_file(image_id)
self.assertEqual(file_content, body)
@test.attr(type='gate')
@@ -77,11 +74,10 @@
# Create image
image_name = data_utils.rand_name('image')
- resp, body = self.client.create_image(name=image_name,
- container_format='bare',
- disk_format='raw',
- visibility='private')
- self.assertEqual(201, resp.status)
+ _, body = self.client.create_image(name=image_name,
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
image_id = body['id']
# Delete Image
@@ -89,8 +85,7 @@
self.client.wait_for_resource_deletion(image_id)
# Verifying deletion
- resp, images = self.client.image_list()
- self.assertEqual(resp.status, 200)
+ _, images = self.client.image_list()
self.assertNotIn(image_id, images)
@test.attr(type='gate')
@@ -99,11 +94,10 @@
# Create image
image_name = data_utils.rand_name('image')
- resp, body = self.client.create_image(name=image_name,
- container_format='bare',
- disk_format='iso',
- visibility='private')
- self.assertEqual(201, resp.status)
+ _, body = self.client.create_image(name=image_name,
+ container_format='bare',
+ disk_format='iso',
+ visibility='private')
self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
image_id = body['id']
@@ -111,20 +105,16 @@
# Now try uploading an image file
file_content = '*' * 1024
image_file = StringIO.StringIO(file_content)
- resp, body = self.client.store_image(image_id, image_file)
- self.assertEqual(204, resp.status)
+ self.client.store_image(image_id, image_file)
# Update Image
new_image_name = data_utils.rand_name('new-image')
- resp, body = self.client.update_image(image_id, [
+ _, body = self.client.update_image(image_id, [
dict(replace='/name', value=new_image_name)])
- self.assertEqual(200, resp.status)
-
# Verifying updating
- resp, body = self.client.get_image(image_id)
- self.assertEqual(200, resp.status)
+ _, body = self.client.get_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(new_image_name, body['name'])
@@ -157,12 +147,12 @@
"""
image_file = StringIO.StringIO('*' * random.randint(1024, 4096))
name = data_utils.rand_name('image-')
- resp, body = cls.create_image(name=name,
- container_format=container_format,
- disk_format=disk_format,
- visibility='private')
+ _, body = cls.create_image(name=name,
+ container_format=container_format,
+ disk_format=disk_format,
+ visibility='private')
image_id = body['id']
- resp, body = cls.client.store_image(image_id, data=image_file)
+ cls.client.store_image(image_id, data=image_file)
return image_id
@@ -170,8 +160,7 @@
"""
Perform list action with given params and validates result.
"""
- resp, images_list = self.client.image_list(params=params)
- self.assertEqual(200, resp.status)
+ _, images_list = self.client.image_list(params=params)
# Validating params of fetched images
for image in images_list:
for key in params:
@@ -181,8 +170,7 @@
@test.attr(type='gate')
def test_index_no_params(self):
# Simple test to see all fixture images returned
- resp, images_list = self.client.image_list()
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list()
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
@@ -211,8 +199,7 @@
# Test to get all images by size
image_id = self.created_images[1]
# Get image metadata
- resp, image = self.client.get_image(image_id)
- self.assertEqual(resp['status'], '200')
+ _, image = self.client.get_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@@ -222,13 +209,11 @@
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[1]
# Get image metadata
- resp, image = self.client.get_image(image_id)
- self.assertEqual(resp['status'], '200')
+ _, image = self.client.get_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
- resp, images_list = self.client.image_list(params=params)
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list(params=params)
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
@@ -246,8 +231,7 @@
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 2}
- resp, images_list = self.client.image_list(params=params)
- self.assertEqual(resp['status'], '200')
+ _, images_list = self.client.image_list(params=params)
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@@ -256,14 +240,12 @@
def test_get_image_schema(self):
# Test to get image schema
schema = "image"
- resp, body = self.client.get_schema(schema)
- self.assertEqual(200, resp.status)
+ _, body = self.client.get_schema(schema)
self.assertEqual("image", body['name'])
@test.attr(type='gate')
def test_get_images_schema(self):
# Test to get images schema
schema = "images"
- resp, body = self.client.get_schema(schema)
- self.assertEqual(200, resp.status)
+ _, body = self.client.get_schema(schema)
self.assertEqual("images", body['name'])
diff --git a/tempest/api/image/v2/test_images_member.py b/tempest/api/image/v2/test_images_member.py
index f80c818..5aaf578 100644
--- a/tempest/api/image/v2/test_images_member.py
+++ b/tempest/api/image/v2/test_images_member.py
@@ -20,9 +20,8 @@
@test.attr(type='gate')
def test_image_share_accept(self):
image_id = self._create_image()
- resp, member = self.os_img_client.add_member(image_id,
- self.alt_tenant_id)
- self.assertEqual(200, resp.status)
+ _, member = self.os_img_client.add_member(image_id,
+ self.alt_tenant_id)
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'pending')
@@ -31,8 +30,7 @@
self.alt_tenant_id,
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
- resp, body = self.os_img_client.get_image_membership(image_id)
- self.assertEqual(200, resp.status)
+ _, body = self.os_img_client.get_image_membership(image_id)
members = body['members']
member = members[0]
self.assertEqual(len(members), 1, str(members))
@@ -43,17 +41,15 @@
@test.attr(type='gate')
def test_image_share_reject(self):
image_id = self._create_image()
- resp, member = self.os_img_client.add_member(image_id,
- self.alt_tenant_id)
- self.assertEqual(200, resp.status)
+ _, member = self.os_img_client.add_member(image_id,
+ self.alt_tenant_id)
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'pending')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
- resp, _ = self.alt_img_client.update_member_status(image_id,
- self.alt_tenant_id,
- 'rejected')
- self.assertEqual(200, resp.status)
+ self.alt_img_client.update_member_status(image_id,
+ self.alt_tenant_id,
+ 'rejected')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
@test.attr(type='gate')
@@ -66,9 +62,8 @@
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
- resp, member = self.os_img_client.get_member(image_id,
- self.alt_tenant_id)
- self.assertEqual(200, resp.status)
+ _, member = self.os_img_client.get_member(image_id,
+ self.alt_tenant_id)
self.assertEqual(self.alt_tenant_id, member['member_id'])
self.assertEqual(image_id, member['image_id'])
self.assertEqual('accepted', member['status'])
@@ -83,18 +78,15 @@
'accepted')
self.assertIn(image_id, self._list_image_ids_as_alt())
- resp = self.os_img_client.remove_member(image_id, self.alt_tenant_id)
- self.assertEqual(204, resp.status)
+ self.os_img_client.remove_member(image_id, self.alt_tenant_id)
self.assertNotIn(image_id, self._list_image_ids_as_alt())
@test.attr(type='gate')
def test_get_image_member_schema(self):
- resp, body = self.os_img_client.get_schema("member")
- self.assertEqual(200, resp.status)
+ _, body = self.os_img_client.get_schema("member")
self.assertEqual("member", body['name'])
@test.attr(type='gate')
def test_get_image_members_schema(self):
- resp, body = self.os_img_client.get_schema("members")
- self.assertEqual(200, resp.status)
+ _, body = self.os_img_client.get_schema("members")
self.assertEqual("members", body['name'])
diff --git a/tempest/api/image/v2/test_images_member_negative.py b/tempest/api/image/v2/test_images_member_negative.py
index 98ef649..7da6e65 100644
--- a/tempest/api/image/v2/test_images_member_negative.py
+++ b/tempest/api/image/v2/test_images_member_negative.py
@@ -21,8 +21,8 @@
@test.attr(type=['negative', 'gate'])
def test_image_share_invalid_status(self):
image_id = self._create_image()
- resp, member = self.os_img_client.add_member(image_id,
- self.alt_tenant_id)
+ _, member = self.os_img_client.add_member(image_id,
+ self.alt_tenant_id)
self.assertEqual(member['status'], 'pending')
self.assertRaises(exceptions.BadRequest,
self.alt_img_client.update_member_status,
@@ -31,8 +31,8 @@
@test.attr(type=['negative', 'gate'])
def test_image_share_owner_cannot_accept(self):
image_id = self._create_image()
- resp, member = self.os_img_client.add_member(image_id,
- self.alt_tenant_id)
+ _, member = self.os_img_client.add_member(image_id,
+ self.alt_tenant_id)
self.assertEqual(member['status'], 'pending')
self.assertNotIn(image_id, self._list_image_ids_as_alt())
self.assertRaises(exceptions.Unauthorized,
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index 27ba39c..722929e 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -52,11 +52,10 @@
def test_get_delete_deleted_image(self):
# get and delete the deleted image
# create and delete image
- resp, body = self.client.create_image(name='test',
- container_format='bare',
- disk_format='raw')
+ _, body = self.client.create_image(name='test',
+ container_format='bare',
+ disk_format='raw')
image_id = body['id']
- self.assertEqual(201, resp.status)
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
diff --git a/tempest/api/image/v2/test_images_tags.py b/tempest/api/image/v2/test_images_tags.py
index dec3353..a9db24b 100644
--- a/tempest/api/image/v2/test_images_tags.py
+++ b/tempest/api/image/v2/test_images_tags.py
@@ -21,23 +21,19 @@
@test.attr(type='gate')
def test_update_delete_tags_for_image(self):
- resp, body = self.create_image(container_format='bare',
- disk_format='raw',
- visibility='private')
+ _, body = self.create_image(container_format='bare',
+ disk_format='raw',
+ visibility='private')
image_id = body['id']
tag = data_utils.rand_name('tag-')
self.addCleanup(self.client.delete_image, image_id)
# Creating image tag and verify it.
- resp, body = self.client.add_image_tag(image_id, tag)
- self.assertEqual(resp.status, 204)
- resp, body = self.client.get_image(image_id)
- self.assertEqual(resp.status, 200)
+ self.client.add_image_tag(image_id, tag)
+ _, body = self.client.get_image(image_id)
self.assertIn(tag, body['tags'])
# Deleting image tag and verify it.
- resp = self.client.delete_image_tag(image_id, tag)
- self.assertEqual(resp.status, 204)
- resp, body = self.client.get_image(image_id)
- self.assertEqual(resp.status, 200)
+ self.client.delete_image_tag(image_id, tag)
+ _, body = self.client.get_image(image_id)
self.assertNotIn(tag, body['tags'])
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 13cfa0a..8e42b7c 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -33,10 +33,10 @@
@test.attr(type=['negative', 'gate'])
def test_delete_non_existing_tag(self):
# Delete non existing tag.
- resp, body = self.create_image(container_format='bare',
- disk_format='raw',
- visibility='private'
- )
+ _, body = self.create_image(container_format='bare',
+ disk_format='raw',
+ visibility='private'
+ )
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag-')
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index a307986..d1a8faf 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -85,3 +85,50 @@
self.assertEqual('200', resp['status'])
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
+
+ @test.requires_ext(extension='lbaas', service='network')
+ @test.attr(type='gate')
+ def test_lbaas_quotas(self):
+ # Add a tenant to conduct the test
+ test_tenant = data_utils.rand_name('test_tenant_')
+ test_description = data_utils.rand_name('desc_')
+ _, tenant = self.identity_admin_client.create_tenant(
+ name=test_tenant,
+ description=test_description)
+ tenant_id = tenant['id']
+ self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+ # Change lbaas quotas for tenant
+ new_quotas = {'vip': 1, 'pool': 2,
+ 'member': 3, 'health_monitor': 4}
+
+ resp, quota_set = self.admin_client.update_quotas(tenant_id,
+ **new_quotas)
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self.admin_client.reset_quotas, tenant_id)
+ self.assertEqual(1, quota_set['vip'])
+ self.assertEqual(2, quota_set['pool'])
+ self.assertEqual(3, quota_set['member'])
+ self.assertEqual(4, quota_set['health_monitor'])
+ # Confirm our tenant is listed among tenants with non default quotas
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ found = False
+ for qs in non_default_quotas['quotas']:
+ if qs['tenant_id'] == tenant_id:
+ found = True
+ self.assertTrue(found)
+ # Confirm from APi quotas were changed as requested for tenant
+ resp, quota_set = self.admin_client.show_quotas(tenant_id)
+ quota_set = quota_set['quota']
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(1, quota_set['vip'])
+ self.assertEqual(2, quota_set['pool'])
+ self.assertEqual(3, quota_set['member'])
+ self.assertEqual(4, quota_set['health_monitor'])
+ # Reset quotas to default and confirm
+ resp, body = self.admin_client.reset_quotas(tenant_id)
+ self.assertEqual('204', resp['status'])
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ for q in non_default_quotas['quotas']:
+ self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index cc768fd..087b87a 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -149,7 +149,8 @@
return network
@classmethod
- def create_subnet(cls, network, gateway=None, cidr=None, mask_bits=None):
+ def create_subnet(cls, network, gateway=None, cidr=None, mask_bits=None,
+ **kwargs):
"""Wrapper utility that returns a test subnet."""
# The cidr and mask_bits depend on the ip version.
if cls._ip_version == 4:
@@ -168,7 +169,8 @@
network_id=network['id'],
cidr=str(subnet_cidr),
ip_version=cls._ip_version,
- gateway_ip=gateway)
+ gateway_ip=gateway,
+ **kwargs)
break
except exceptions.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index e0e26da..c897716 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -70,6 +70,27 @@
self.assertTrue(port, msg)
self._confirm_allowed_address_pair(port[0], self.ip_address)
+ @test.attr(type='smoke')
+ def test_update_port_with_address_pair(self):
+ # Create a port without allowed address pair
+ resp, body = self.client.create_port(network_id=self.network['id'])
+ self.assertEqual('201', resp['status'])
+ port_id = body['port']['id']
+ self.addCleanup(self.client.delete_port, port_id)
+
+ # Confirm port is created
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual('200', resp['status'])
+
+ # Update allowed address pair attribute of port
+ allowed_address_pairs = [{'ip_address': self.ip_address,
+ 'mac_address': self.mac_address}]
+ resp, body = self.client.update_port(port_id,
+ allowed_address_pairs=allowed_address_pairs)
+ self.assertEqual('200', resp['status'])
+ newport = body['port']
+ self._confirm_allowed_address_pair(newport, self.ip_address)
+
def _confirm_allowed_address_pair(self, port, ip):
msg = 'Port allowed address pairs should not be empty'
self.assertTrue(port['allowed_address_pairs'], msg)
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index ac0fd11..78cb221 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -14,6 +14,7 @@
# under the License.
import netaddr
+import testtools
from tempest.api.network import base
from tempest.common.utils import data_utils
@@ -433,6 +434,29 @@
self.assertEqual('204', resp['status'])
self.subnets.pop()
+ @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
+ "IPv6 extended attributes for subnets not "
+ "available")
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_v6_attributes(self):
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network,
+ gateway='fe80::1',
+ ipv6_ra_mode='slaac',
+ ipv6_address_mode='slaac')
+ # Verifies Subnet GW in IPv6
+ self.assertEqual(subnet['gateway_ip'], 'fe80::1')
+ self.assertEqual(subnet['ipv6_ra_mode'], 'slaac')
+ self.assertEqual(subnet['ipv6_address_mode'], 'slaac')
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
+
class NetworksIpV6TestXML(NetworksIpV6TestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index d1fe15c..0cc3f19 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -22,19 +22,15 @@
CONF = config.CONF
-class VPNaaSTestJSON(base.BaseNetworkTest):
+class VPNaaSTestJSON(base.BaseAdminNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
-
- List VPN Services
- Show VPN Services
- Create VPN Services
- Update VPN Services
- Delete VPN Services
+ List, Show, Create, Delete, and Update VPN Service
List, Show, Create, Delete, and Update IKE policy
+ List, Show, Create, Delete, and Update IPSec policy
"""
@classmethod
@@ -47,11 +43,12 @@
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
- data_utils.rand_name("router-"),
+ data_utils.rand_name("router"),
external_network_id=CONF.network.public_network_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
cls.router['id'])
+
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
@@ -87,6 +84,85 @@
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
+ def _delete_vpn_service(self, vpn_service_id):
+ resp, _ = self.client.delete_vpnservice(vpn_service_id)
+ self.assertEqual('204', resp['status'])
+ # Asserting if vpn service is found in the list after deletion
+ _, body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertNotIn(vpn_service_id, vpn_services)
+
+ def _get_tenant_id(self):
+ """
+ Returns the tenant_id of the client current user
+ """
+ # TODO(jroovers) This is a temporary workaround to get the tenant_id
+ # of the the current client. Replace this once tenant_isolation for
+ # neutron is fixed.
+ _, body = self.client.show_network(self.network['id'])
+ return body['network']['tenant_id']
+
+ @test.attr(type='smoke')
+ def test_admin_create_ipsec_policy_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+ # Create IPSec policy for the newly created tenant
+ name = data_utils.rand_name('ipsec-policy')
+ resp, body = (self.admin_client.
+ create_ipsecpolicy(name=name, tenant_id=tenant_id))
+ self.assertEqual('201', resp['status'])
+ ipsecpolicy = body['ipsecpolicy']
+ self.assertIsNotNone(ipsecpolicy['id'])
+ self.addCleanup(self.admin_client.delete_ipsecpolicy,
+ ipsecpolicy['id'])
+
+ # Assert that created ipsec policy is found in API list call
+ _, body = self.client.list_ipsecpolicies()
+ ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
+ self.assertIn(ipsecpolicy['id'], ipsecpolicies)
+
+ @test.attr(type='smoke')
+ def test_admin_create_vpn_service_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+
+ # Create vpn service for the newly created tenant
+ name = data_utils.rand_name('vpn-service')
+ resp, body = self.admin_client.create_vpnservice(
+ subnet_id=self.subnet['id'],
+ router_id=self.router['id'],
+ name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ self.assertEqual('201', resp['status'])
+ vpnservice = body['vpnservice']
+ self.assertIsNotNone(vpnservice['id'])
+ self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
+
+ # Assert that created vpnservice is found in API list call
+ _, body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertIn(vpnservice['id'], vpn_services)
+
+ @test.attr(type='smoke')
+ def test_admin_create_ike_policy_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+
+ # Create IKE policy for the newly created tenant
+ name = data_utils.rand_name('ike-policy')
+ resp, body = (self.admin_client.
+ create_ikepolicy(name=name, ike_version="v1",
+ encryption_algorithm="aes-128",
+ auth_algorithm="sha1",
+ tenant_id=tenant_id))
+ self.assertEqual('201', resp['status'])
+ ikepolicy = body['ikepolicy']
+ self.assertIsNotNone(ikepolicy['id'])
+ self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
+
+ # Assert that created ike policy is found in API list call
+ _, body = self.client.list_ikepolicies()
+ ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
+ self.assertIn(ikepolicy['id'], ikepolicies)
+
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
@@ -97,14 +173,15 @@
@test.attr(type='smoke')
def test_create_update_delete_vpn_service(self):
- # Creates a VPN service
- name = data_utils.rand_name('vpn-service-')
+ # Creates a VPN service and sets up deletion
+ name = data_utils.rand_name('vpn-service')
resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
router_id=self.router['id'],
name=name,
admin_state_up=True)
self.assertEqual('201', resp['status'])
vpnservice = body['vpnservice']
+ self.addCleanup(self._delete_vpn_service, vpnservice['id'])
# Assert if created vpnservices are not found in vpnservices list
resp, body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
@@ -116,14 +193,6 @@
# But precondition is that current state of vpnservice
# should be "ACTIVE" not "PENDING*"
- # Verification of vpn service delete
- resp, body = self.client.delete_vpnservice(vpnservice['id'])
- self.assertEqual('204', resp['status'])
- # Asserting if vpn service is found in the list after deletion
- resp, body = self.client.list_vpnservices()
- vpn_services = [vs['id'] for vs in body['vpnservices']]
- self.assertNotIn(vpnservice['id'], vpn_services)
-
@test.attr(type='smoke')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
@@ -137,6 +206,9 @@
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
+ valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
+ "PENDING_UPDATE", "PENDING_DELETE"]
+ self.assertIn(vpnservice['status'], valid_status)
@test.attr(type='smoke')
def test_list_ike_policies(self):
@@ -149,7 +221,7 @@
@test.attr(type='smoke')
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
- name = data_utils.rand_name('ike-policy-')
+ name = data_utils.rand_name('ike-policy')
resp, body = (self.client.create_ikepolicy(
name=name,
ike_version="v1",
@@ -157,19 +229,31 @@
auth_algorithm="sha1"))
self.assertEqual('201', resp['status'])
ikepolicy = body['ikepolicy']
+ self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
- # Verification of ike policy update
- description = "Updated ike policy"
- new_ike = {'description': description, 'pfs': 'group5',
- 'name': data_utils.rand_name("New-IKE-")}
- resp, body = self.client.update_ikepolicy(ikepolicy['id'],
- **new_ike)
+
+ # Update IKE Policy
+ new_ike = {'name': data_utils.rand_name("New-IKE"),
+ 'description': "Updated ike policy",
+ 'encryption_algorithm': "aes-256",
+ 'ike_version': "v2",
+ 'pfs': "group14",
+ 'lifetime': {'units': "seconds", 'value': 2000}}
+ resp, _ = self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
self.assertEqual('200', resp['status'])
- updated_ike_policy = body['ikepolicy']
- self.assertEqual(updated_ike_policy['description'], description)
+ # Confirm that update was successful by verifying using 'show'
+ _, body = self.client.show_ikepolicy(ikepolicy['id'])
+ ike_policy = body['ikepolicy']
+ for key, value in new_ike.iteritems():
+ self.assertIn(key, ike_policy)
+ self.assertEqual(value, ike_policy[key])
+
# Verification of ike policy delete
- resp, body = self.client.delete_ikepolicy(ikepolicy['id'])
+ resp, _ = self.client.delete_ikepolicy(ikepolicy['id'])
self.assertEqual('204', resp['status'])
+ _, body = self.client.list_ikepolicies()
+ ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
+ self.assertNotIn(ike_policy['id'], ikepolicies)
@test.attr(type='smoke')
def test_show_ike_policy(self):
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 6b18182..ccc0067 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -44,9 +44,6 @@
cls.isolated_creds.get_admin_creds())
# Get isolated creds for alt user
cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
- # Add isolated users to operator role so that they can create a
- # container in swift.
- cls._assign_member_role()
else:
cls.os = clients.Manager()
cls.os_admin = clients.AdminManager()
@@ -80,22 +77,6 @@
super(BaseObjectTest, cls).tearDownClass()
@classmethod
- def _assign_member_role(cls):
- primary_creds = cls.isolated_creds.get_primary_creds()
- alt_creds = cls.isolated_creds.get_alt_creds()
- swift_role = CONF.object_storage.operator_role
- try:
- resp, roles = cls.os_admin.identity_client.list_roles()
- role = next(r for r in roles if r['name'] == swift_role)
- except StopIteration:
- msg = "No role named %s found" % swift_role
- raise exceptions.NotFound(msg)
- for creds in [primary_creds, alt_creds]:
- cls.os_admin.identity_client.assign_user_role(creds.tenant_id,
- creds.user_id,
- role['id'])
-
- @classmethod
def delete_containers(cls, containers, container_client=None,
object_client=None):
"""Remove given containers and all objects in them.
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 53ca20d..73b4f3b 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -54,14 +54,18 @@
self.assertEqual(resp['status'], '200')
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertIn('x-delete-at', resp)
+ # we want to ensure that we will sleep long enough for things to
+ # actually expire, so figure out how many secs in the future that is.
+ sleepy_time = int(resp['x-delete-at']) - int(time.time())
+
resp, body = self.object_client.get_object(self.container_name,
self.object_name)
self.assertEqual(resp['status'], '200')
self.assertHeaders(resp, 'Object', 'GET')
self.assertIn('x-delete-at', resp)
- # sleep for over 5 seconds, so that object expires
- time.sleep(5)
+ # add a couple of seconds for safety.
+ time.sleep(sleepy_time + 3)
# object should not be there anymore
self.assertRaises(exceptions.NotFound, self.object_client.get_object,
@@ -69,10 +73,12 @@
@test.attr(type='gate')
def test_get_object_after_expiry_time(self):
- metadata = {'X-Delete-After': '3'}
+ # the 10s is important, because the get calls can take 3s each
+ # some times
+ metadata = {'X-Delete-After': '10'}
self._test_object_expiry(metadata)
@test.attr(type='gate')
def test_get_object_at_expiry_time(self):
- metadata = {'X-Delete-At': str(int(time.time()) + 3)}
+ metadata = {'X-Delete-At': str(int(time.time()) + 10)}
self._test_object_expiry(metadata)
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 06e63a4..1ef9aa1 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -13,11 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import cStringIO as StringIO
import hashlib
import random
import re
from six import moves
import time
+import zlib
from tempest.api.object_storage import base
from tempest.common import custom_matchers
@@ -61,7 +63,7 @@
return object_name, data_segments
- @test.attr(type='smoke')
+ @test.attr(type='gate')
def test_create_object(self):
# create object
object_name = data_utils.rand_name(name='TestObject')
@@ -76,7 +78,242 @@
self.assertEqual(resp['status'], '201')
self.assertHeaders(resp, 'Object', 'PUT')
- @test.attr(type='smoke')
+ # check uploaded content
+ _, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_content_disposition(self):
+ # create object with content_disposition
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {}
+ metadata['content-disposition'] = 'inline'
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=None)
+ self.assertIn('content-disposition', resp)
+ self.assertEqual(resp['content-disposition'], 'inline')
+ self.assertEqual(body, data)
+
+ @test.attr(type='gate')
+ def test_create_object_with_content_encoding(self):
+ # create object with content_encoding
+ object_name = data_utils.rand_name(name='TestObject')
+
+ # put compressed string
+ data_before = 'x' * 2000
+ data = zlib.compress(data_before)
+ metadata = {}
+ metadata['content-encoding'] = 'deflate'
+
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ # download compressed object
+ metadata = {}
+ metadata['accept-encoding'] = 'deflate'
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=metadata)
+ self.assertEqual(body, data_before)
+
+ @test.attr(type='gate')
+ def test_create_object_with_etag(self):
+ # create object with etag
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ md5 = hashlib.md5(data).hexdigest()
+ metadata = {'Etag': md5}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ # check uploaded content
+ _, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_expect_continue(self):
+ # create object with expect_continue
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {'Expect': '100-continue'}
+ resp = self.custom_object_client.create_object_continue(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+
+ self.assertIn('status', resp)
+ self.assertEqual(resp['status'], '100')
+
+ self.custom_object_client.create_object_continue(
+ self.container_name,
+ object_name,
+ data,
+ metadata=None)
+
+ # check uploaded content
+ _, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_transfer_encoding(self):
+ # create object with transfer_encoding
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string(1024)
+ status, _, resp_headers = self.object_client.put_object_with_chunk(
+ container=self.container_name,
+ name=object_name,
+ contents=StringIO.StringIO(data),
+ chunk_size=512)
+ self.assertEqual(status, 201)
+ self.assertHeaders(resp_headers, 'Object', 'PUT')
+
+ # check uploaded content
+ _, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_x_fresh_metadata(self):
+ # create object with x_fresh_metadata
+ object_name_base = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata_1 = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name_base,
+ data,
+ metadata=metadata_1)
+ object_name = data_utils.rand_name(name='TestObject')
+ metadata_2 = {'X-Copy-From': '%s/%s' % (self.container_name,
+ object_name_base),
+ 'X-Fresh-Metadata': 'true'}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ '',
+ metadata=metadata_2)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ resp, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta', resp)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_x_object_meta(self):
+ # create object with object_meta
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {'X-Object-Meta-test-meta': 'Meta'}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ resp, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_x_object_metakey(self):
+ # create object with the blank value of metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {'X-Object-Meta-test-meta': ''}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ resp, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], '')
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_x_remove_object_meta(self):
+ # create object with x_remove_object_meta
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=metadata_add)
+ metadata_remove = {'X-Remove-Object-Meta-test-meta': 'Meta'}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata_remove)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ resp, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta', resp)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
+ def test_create_object_with_x_remove_object_metakey(self):
+ # create object with the blank value of remove metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=metadata_add)
+ metadata_remove = {'X-Remove-Object-Meta-test-meta': ''}
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data,
+ metadata=metadata_remove)
+ self.assertEqual(resp['status'], '201')
+ self.assertHeaders(resp, 'Object', 'PUT')
+
+ resp, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta', resp)
+ self.assertEqual(data, body)
+
+ @test.attr(type='gate')
def test_delete_object(self):
# create object
object_name = data_utils.rand_name(name='TestObject')
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index 5649619..f4ff7f1 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
@@ -89,3 +90,80 @@
"""Wrapper utility that sets the metadata of a queue."""
resp, body = cls.client.set_queue_metadata(queue_name, rbody)
return resp, body
+
+ @classmethod
+ def post_messages(cls, queue_name, rbody):
+ '''Wrapper utility that posts messages to a queue.'''
+ resp, body = cls.client.post_messages(queue_name, rbody)
+
+ return resp, body
+
+ @classmethod
+ def list_messages(cls, queue_name):
+ '''Wrapper utility that lists the messages in a queue.'''
+ resp, body = cls.client.list_messages(queue_name)
+
+ return resp, body
+
+ @classmethod
+ def get_single_message(cls, message_uri):
+ '''Wrapper utility that gets a single message.'''
+ resp, body = cls.client.get_single_message(message_uri)
+
+ return resp, body
+
+ @classmethod
+ def get_multiple_messages(cls, message_uri):
+ '''Wrapper utility that gets multiple messages.'''
+ resp, body = cls.client.get_multiple_messages(message_uri)
+
+ return resp, body
+
+ @classmethod
+ def delete_messages(cls, message_uri):
+ '''Wrapper utility that deletes messages.'''
+ resp, body = cls.client.delete_messages(message_uri)
+
+ return resp, body
+
+ @classmethod
+ def post_claims(cls, queue_name, rbody, url_params=False):
+ '''Wrapper utility that claims messages.'''
+ resp, body = cls.client.post_claims(
+ queue_name, rbody, url_params=False)
+
+ return resp, body
+
+ @classmethod
+ def query_claim(cls, claim_uri):
+ '''Wrapper utility that gets a claim.'''
+ resp, body = cls.client.query_claim(claim_uri)
+
+ return resp, body
+
+ @classmethod
+ def update_claim(cls, claim_uri, rbody):
+ '''Wrapper utility that updates a claim.'''
+ resp, body = cls.client.update_claim(claim_uri, rbody)
+
+ return resp, body
+
+ @classmethod
+ def release_claim(cls, claim_uri):
+ '''Wrapper utility that deletes a claim.'''
+ resp, body = cls.client.release_claim(claim_uri)
+
+ return resp, body
+
+ @classmethod
+ def generate_message_body(cls, repeat=1):
+ '''Wrapper utility that sets the metadata of a queue.'''
+ message_ttl = data_utils.rand_int_id(start=60,
+ end=CONF.queuing.max_message_ttl)
+
+ key = data_utils.arbitrary_string(size=20, base_text='QueuingKey')
+ value = data_utils.arbitrary_string(size=20, base_text='QueuingValue')
+ message_body = {key: value}
+
+ rbody = ([{'body': message_body, 'ttl': message_ttl}] * repeat)
+ return rbody
diff --git a/tempest/api/queuing/test_claims.py b/tempest/api/queuing/test_claims.py
new file mode 100644
index 0000000..a306623
--- /dev/null
+++ b/tempest/api/queuing/test_claims.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2014 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import urlparse
+
+from tempest.api.queuing import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class TestClaims(base.BaseQueuingTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClaims, cls).setUpClass()
+ cls.queue_name = data_utils.rand_name('Queues-Test')
+ # Create Queue
+ cls.create_queue(cls.queue_name)
+
+ def _post_and_claim_messages(self, queue_name, repeat=1):
+ # Post Messages
+ message_body = self.generate_message_body(repeat=repeat)
+ self.client.post_messages(queue_name=self.queue_name,
+ rbody=message_body)
+
+ # Post Claim
+ claim_ttl = data_utils.rand_int_id(start=60,
+ end=CONF.queuing.max_claim_ttl)
+ claim_grace = data_utils.rand_int_id(start=60,
+ end=CONF.queuing.max_claim_grace)
+ claim_body = {"ttl": claim_ttl, "grace": claim_grace}
+ resp, body = self.client.post_claims(queue_name=self.queue_name,
+ rbody=claim_body)
+
+ return resp, body
+
+ @test.attr(type='smoke')
+ def test_post_claim(self):
+ _, body = self._post_and_claim_messages(queue_name=self.queue_name)
+ claimed_message_uri = body[0]['href']
+
+ # Skipping this step till bug-1331517 is fixed
+ # Get posted claim
+ # self.client.query_claim(claimed_message_uri)
+
+ # Delete Claimed message
+ self.client.delete_messages(claimed_message_uri)
+
+ @test.skip_because(bug="1331517")
+ @test.attr(type='smoke')
+ def test_query_claim(self):
+ # Post a Claim
+ resp, body = self._post_and_claim_messages(queue_name=self.queue_name)
+
+ # Query Claim
+ claim_uri = resp['location']
+ self.client.query_claim(claim_uri)
+
+ # Delete Claimed message
+ claimed_message_uri = body[0]['href']
+ self.delete_messages(claimed_message_uri)
+
+ @test.skip_because(bug="1328111")
+ @test.attr(type='smoke')
+ def test_update_claim(self):
+ # Post a Claim
+ resp, body = self._post_and_claim_messages(queue_name=self.queue_name)
+
+ claim_uri = resp['location']
+ claimed_message_uri = body[0]['href']
+
+ # Update Claim
+ claim_ttl = data_utils.rand_int_id(start=60,
+ end=CONF.queuing.max_claim_ttl)
+ update_rbody = {"ttl": claim_ttl}
+
+ self.client.update_claim(claim_uri, rbody=update_rbody)
+
+ # Verify claim ttl >= updated ttl value
+ _, body = self.client.query_claim(claim_uri)
+ updated_claim_ttl = body["ttl"]
+ self.assertTrue(updated_claim_ttl >= claim_ttl)
+
+ # Delete Claimed message
+ self.client.delete_messages(claimed_message_uri)
+
+ @test.attr(type='smoke')
+ def test_release_claim(self):
+ # Post a Claim
+ resp, body = self._post_and_claim_messages(queue_name=self.queue_name)
+ claim_uri = resp['location']
+
+ # Release Claim
+ self.client.release_claim(claim_uri)
+
+ # Delete Claimed message
+ # This will implicitly verify that the claim is deleted.
+ message_uri = urlparse.urlparse(claim_uri).path
+ self.client.delete_messages(message_uri)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.delete_queue(cls.queue_name)
+ super(TestClaims, cls).tearDownClass()
diff --git a/tempest/api/queuing/test_messages.py b/tempest/api/queuing/test_messages.py
new file mode 100644
index 0000000..9546c91
--- /dev/null
+++ b/tempest/api/queuing/test_messages.py
@@ -0,0 +1,122 @@
+# Copyright (c) 2014 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from tempest.api.queuing import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class TestMessages(base.BaseQueuingTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMessages, cls).setUpClass()
+ cls.queue_name = data_utils.rand_name('Queues-Test')
+ # Create Queue
+ cls.client.create_queue(cls.queue_name)
+
+ def _post_messages(self, repeat=CONF.queuing.max_messages_per_page):
+ message_body = self.generate_message_body(repeat=repeat)
+ resp, body = self.post_messages(queue_name=self.queue_name,
+ rbody=message_body)
+ return resp, body
+
+ @test.attr(type='smoke')
+ def test_post_messages(self):
+ # Post Messages
+ resp, _ = self._post_messages()
+
+ # Get on the posted messages
+ message_uri = resp['location']
+ resp, _ = self.client.get_multiple_messages(message_uri)
+ # The test has an assertion here, because the response cannot be 204
+ # in this case (the client allows 200 or 204 for this API call).
+ self.assertEqual('200', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_list_messages(self):
+ # Post Messages
+ self._post_messages()
+
+ # List Messages
+ resp, _ = self.list_messages(queue_name=self.queue_name)
+ # The test has an assertion here, because the response cannot be 204
+ # in this case (the client allows 200 or 204 for this API call).
+ self.assertEqual('200', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_get_message(self):
+ # Post Messages
+ _, body = self._post_messages()
+ message_uri = body['resources'][0]
+
+ # Get posted message
+ resp, _ = self.client.get_single_message(message_uri)
+ # The test has an assertion here, because the response cannot be 204
+ # in this case (the client allows 200 or 204 for this API call).
+ self.assertEqual('200', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_get_multiple_messages(self):
+ # Post Messages
+ resp, _ = self._post_messages()
+ message_uri = resp['location']
+
+ # Get posted messages
+ resp, _ = self.client.get_multiple_messages(message_uri)
+ # The test has an assertion here, because the response cannot be 204
+ # in this case (the client allows 200 or 204 for this API call).
+ self.assertEqual('200', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_delete_single_message(self):
+ # Post Messages
+ _, body = self._post_messages()
+ message_uri = body['resources'][0]
+
+ # Delete posted message & verify the delete operration
+ self.client.delete_messages(message_uri)
+
+ message_uri = message_uri.replace('/messages/', '/messages?ids=')
+ resp, _ = self.client.get_multiple_messages(message_uri)
+ # The test has an assertion here, because the response has to be 204
+ # in this case (the client allows 200 or 204 for this API call).
+ self.assertEqual('204', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_delete_multiple_messages(self):
+ # Post Messages
+ resp, _ = self._post_messages()
+ message_uri = resp['location']
+
+ # Delete multiple messages
+ self.client.delete_messages(message_uri)
+ resp, _ = self.client.get_multiple_messages(message_uri)
+ # The test has an assertion here, because the response has to be 204
+ # in this case (the client allows 200 or 204 for this API call).
+ self.assertEqual('204', resp['status'])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.delete_queue(cls.queue_name)
+ super(TestMessages, cls).tearDownClass()
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
index b04cf64..e5f3a8d 100644
--- a/tempest/api_schema/compute/agents.py
+++ b/tempest/api_schema/compute/agents.py
@@ -22,7 +22,7 @@
'items': {
'type': 'object',
'properties': {
- 'agent_id': {'type': ['integer', 'string']},
+ 'agent_id': {'type': 'integer'},
'hypervisor': {'type': 'string'},
'os': {'type': 'string'},
'architecture': {'type': 'string'},
@@ -38,3 +38,24 @@
'required': ['agents']
}
}
+
+common_create_agent = {
+ 'type': 'object',
+ 'properties': {
+ 'agent': {
+ 'type': 'object',
+ 'properties': {
+ 'agent_id': {'type': ['integer', 'string']},
+ 'hypervisor': {'type': 'string'},
+ 'os': {'type': 'string'},
+ 'architecture': {'type': 'string'},
+ 'version': {'type': 'string'},
+ 'url': {'type': 'string', 'format': 'uri'},
+ 'md5hash': {'type': 'string'}
+ },
+ 'required': ['agent_id', 'hypervisor', 'os', 'architecture',
+ 'version', 'url', 'md5hash']
+ }
+ },
+ 'required': ['agent']
+}
diff --git a/tempest/api_schema/compute/keypairs.py b/tempest/api_schema/compute/keypairs.py
index b8f905f..2ae410c 100644
--- a/tempest/api_schema/compute/keypairs.py
+++ b/tempest/api_schema/compute/keypairs.py
@@ -49,10 +49,7 @@
'fingerprint': {'type': 'string'},
'name': {'type': 'string'},
'public_key': {'type': 'string'},
- # NOTE: Now the type of 'user_id' is integer, but here
- # allows 'string' also because we will be able to change
- # it to 'uuid' in the future.
- 'user_id': {'type': ['integer', 'string']},
+ 'user_id': {'type': 'string'},
'private_key': {'type': 'string'}
},
# When create keypair API is being called with 'Public key'
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
index 6723869..6549272 100644
--- a/tempest/api_schema/compute/migrations.py
+++ b/tempest/api_schema/compute/migrations.py
@@ -22,10 +22,7 @@
'items': {
'type': 'object',
'properties': {
- # NOTE: Now the type of 'id' is integer, but here
- # allows 'string' also because we will be able to
- # change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'integer'},
'status': {'type': 'string'},
'instance_uuid': {'type': 'string'},
'source_node': {'type': 'string'},
@@ -33,12 +30,8 @@
'dest_node': {'type': 'string'},
'dest_compute': {'type': 'string'},
'dest_host': {'type': 'string'},
- 'old_instance_type_id': {
- 'type': ['integer', 'string']
- },
- 'new_instance_type_id': {
- 'type': ['integer', 'string']
- },
+ 'old_instance_type_id': {'type': 'integer'},
+ 'new_instance_type_id': {'type': 'integer'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index 14e9ce9..ad0aa29 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -48,7 +48,7 @@
}
}
-base_update_server = {
+base_update_get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
@@ -56,13 +56,13 @@
'server': {
'type': 'object',
'properties': {
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
'image': {
'type': 'object',
'properties': {
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
@@ -70,7 +70,7 @@
'flavor': {
'type': 'object',
'properties': {
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
diff --git a/tempest/api_schema/compute/services.py b/tempest/api_schema/compute/services.py
index 4c58013..eaba129 100644
--- a/tempest/api_schema/compute/services.py
+++ b/tempest/api_schema/compute/services.py
@@ -22,10 +22,7 @@
'items': {
'type': 'object',
'properties': {
- # NOTE: Now the type of 'id' is integer, but here
- # allows 'string' also because we will be able to
- # change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'integer'},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
diff --git a/tempest/api_schema/compute/v2/agents.py b/tempest/api_schema/compute/v2/agents.py
index 837731f..30f999f 100644
--- a/tempest/api_schema/compute/v2/agents.py
+++ b/tempest/api_schema/compute/v2/agents.py
@@ -12,6 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.api_schema.compute import agents
+
+create_agent = {
+ 'status_code': [200],
+ 'response_body': agents.common_create_agent
+}
+
delete_agent = {
'status_code': [200]
}
diff --git a/tempest/api_schema/compute/v2/floating_ips.py b/tempest/api_schema/compute/v2/floating_ips.py
index 03e6aef..fb3667b 100644
--- a/tempest/api_schema/compute/v2/floating_ips.py
+++ b/tempest/api_schema/compute/v2/floating_ips.py
@@ -27,7 +27,7 @@
# able to change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
- 'instance_id': {'type': ['integer', 'string', 'null']},
+ 'instance_id': {'type': ['string', 'null']},
'ip': {
'type': 'string',
'format': 'ip-address'
@@ -58,7 +58,7 @@
# 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
- 'instance_id': {'type': ['integer', 'string', 'null']},
+ 'instance_id': {'type': ['string', 'null']},
'ip': {
'type': 'string',
'format': 'ip-address'
@@ -117,3 +117,14 @@
'required': ['floating_ips_bulk_create']
}
}
+
+delete_floating_ips_bulk = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'floating_ips_bulk_delete': {'type': 'string'}
+ },
+ 'required': ['floating_ips_bulk_delete']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index d121060..90737a2 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -30,10 +30,7 @@
'server': {
'type': 'object',
'properties': {
- # NOTE: Now the type of 'id' is integer, but here
- # allows 'string' also because we will be able to
- # change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'string'},
'links': parameter_types.links
},
'required': ['id', 'links']
diff --git a/tempest/api_schema/compute/v2/keypairs.py b/tempest/api_schema/compute/v2/keypairs.py
index 9a025c3..32d8cca 100644
--- a/tempest/api_schema/compute/v2/keypairs.py
+++ b/tempest/api_schema/compute/v2/keypairs.py
@@ -25,10 +25,7 @@
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'},
- # NOTE: Now the type of 'user_id' is integer, but here
- # allows 'string' also because we will be able to change
- # it to 'uuid' in the future.
- 'user_id': {'type': ['integer', 'string']},
+ 'user_id': {'type': 'string'},
'deleted': {'type': 'boolean'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index fe53abd..dc4054c 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -25,10 +25,7 @@
'server': {
'type': 'object',
'properties': {
- # NOTE: Now the type of 'id' is uuid, but here allows
- # 'integer' also because old OpenStack uses 'integer'
- # as a server id.
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'string'},
'security_groups': {'type': 'array'},
'links': parameter_types.links,
'adminPass': {'type': 'string'},
@@ -46,7 +43,7 @@
}
}
-update_server = copy.deepcopy(servers.base_update_server)
+update_server = copy.deepcopy(servers.base_update_get_server)
update_server['response_body']['properties']['server']['properties'].update({
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
@@ -60,6 +57,39 @@
'hostId'
)
+get_server = copy.deepcopy(servers.base_update_get_server)
+get_server['response_body']['properties']['server']['properties'].update({
+ 'key_name': {'type': ['string', 'null']},
+ 'hostId': {'type': 'string'},
+
+ # NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
+ # attributes.
+ 'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
+ 'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
+ 'OS-EXT-AZ:availability_zone': {'type': 'string'},
+
+ # NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
+ # attributes.
+ 'OS-EXT-STS:task_state': {'type': ['string', 'null']},
+ 'OS-EXT-STS:vm_state': {'type': 'string'},
+ 'OS-EXT-STS:power_state': {'type': 'integer'},
+ 'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
+ 'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
+ 'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
+ 'os-extended-volumes:volumes_attached': {'type': 'array'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6,
+ 'config_drive': {'type': 'string'}
+})
+get_server['response_body']['properties']['server']['required'].append(
+ # NOTE: OS-SRV-USG, OS-EXT-AZ, OS-EXT-STS, OS-EXT-SRV-ATTR,
+ # os-extended-volumes, OS-DCF and accessIPv4/v6 are API
+ # extension, and some environments return a response without
+ # these attributes. So they are not 'required'.
+ 'hostId'
+)
+
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
@@ -178,6 +208,20 @@
'status_code': [204]
}
+list_server_groups = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server_groups': {
+ 'type': 'array',
+ 'items': common_server_group
+ }
+ },
+ 'required': ['server_groups']
+ }
+}
+
instance_actions_object = copy.deepcopy(servers.common_instance_actions)
instance_actions_object[
'properties'].update({'instance_uuid': {'type': 'string'}})
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 84a659c..1af951f 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -38,7 +38,7 @@
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
- 'serverId': {'type': ['integer', 'string']}
+ 'serverId': {'type': 'string'}
}
# NOTE- If volume is not attached to any server
# then, 'attachments' attributes comes as array
@@ -86,7 +86,7 @@
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
- 'serverId': {'type': ['integer', 'string']}
+ 'serverId': {'type': 'string'}
}
# NOTE- If volume is not attached to any server
# then, 'attachments' attributes comes as array
diff --git a/tempest/api_schema/compute/v3/agents.py b/tempest/api_schema/compute/v3/agents.py
index 63d1c46..597a089 100644
--- a/tempest/api_schema/compute/v3/agents.py
+++ b/tempest/api_schema/compute/v3/agents.py
@@ -12,6 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.api_schema.compute import agents
+
+create_agent = {
+ 'status_code': [201],
+ 'response_body': agents.common_create_agent
+}
+
delete_agent = {
'status_code': [204]
}
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 4fb2d87..3b50516 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -25,10 +25,7 @@
'server': {
'type': 'object',
'properties': {
- # NOTE: Now the type of 'id' is uuid, but here allows
- # 'integer' also because old OpenStack uses 'integer'
- # as a server id.
- 'id': {'type': ['integer', 'string']},
+ 'id': {'type': 'string'},
'os-security-groups:security_groups': {'type': 'array'},
'links': parameter_types.links,
'admin_password': {'type': 'string'},
@@ -57,7 +54,7 @@
['type', 'mac_addr']
)
-update_server = copy.deepcopy(servers.base_update_server)
+update_server = copy.deepcopy(servers.base_update_get_server)
update_server['response_body']['properties']['server']['properties'].update({
'addresses': addresses_v3,
'host_id': {'type': 'string'},
@@ -71,6 +68,43 @@
'host_id'
)
+get_server = copy.deepcopy(servers.base_update_get_server)
+get_server['response_body']['properties']['server']['properties'].update({
+ 'key_name': {'type': ['string', 'null']},
+ 'host_id': {'type': 'string'},
+
+ # NOTE: Non-admin users also can see "os-server-usage" and
+ # "os-extended-availability-zone" attributes.
+ 'os-server-usage:launched_at': {'type': ['string', 'null']},
+ 'os-server-usage:terminated_at': {'type': ['string', 'null']},
+ 'os-extended-availability-zone:availability_zone': {'type': 'string'},
+
+ # NOTE: Admin users only can see "os-extended-status" and
+ # "os-extended-server-attributes" attributes.
+ 'os-extended-status:task_state': {'type': ['string', 'null']},
+ 'os-extended-status:vm_state': {'type': 'string'},
+ 'os-extended-status:power_state': {'type': 'integer'},
+ 'os-extended-status:locked_by': {'type': ['string', 'null']},
+ 'os-extended-server-attributes:host': {'type': ['string', 'null']},
+ 'os-extended-server-attributes:instance_name': {'type': 'string'},
+ 'os-extended-server-attributes:hypervisor_hostname': {
+ 'type': ['string', 'null']
+ },
+ 'os-extended-volumes:volumes_attached': {'type': 'array'},
+ 'os-pci:pci_devices': {'type': 'array'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6,
+ 'os-config-drive:config_drive': {'type': 'string'}
+})
+get_server['response_body']['properties']['server']['required'].append(
+ # NOTE: os-server-usage, os-extended-availability-zone,
+ # os-extended-status, os-extended-server-attributes,
+ # os-extended-volumes, os-pci, os-access-ips and
+ # os-config-driveare API extension, and some environments
+ # return a response without these attributes. So they are not 'required'.
+ 'host_id'
+)
+
attach_detach_volume = {
'status_code': [202]
}
diff --git a/tempest/api_schema/queuing/v1/queues.py b/tempest/api_schema/queuing/v1/queues.py
index 4630e1c..f0b2691 100644
--- a/tempest/api_schema/queuing/v1/queues.py
+++ b/tempest/api_schema/queuing/v1/queues.py
@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+
list_link = {
'type': 'object',
'properties': {
@@ -58,6 +59,11 @@
}
}
+age = {
+ 'type': 'number',
+ 'minimum': 0
+}
+
message_link = {
'type': 'object',
'properties': {
@@ -65,7 +71,7 @@
'type': 'string',
'format': 'uri'
},
- 'age': {'type': 'number'},
+ 'age': age,
'created': {
'type': 'string',
'format': 'date-time'
@@ -96,3 +102,136 @@
'required': ['messages']
}
}
+
+resource_schema = {
+ 'type': 'array',
+ 'items': 'string',
+ 'minItems': 1
+}
+
+post_messages = {
+ 'status_code': [201],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'resources': resource_schema,
+ 'partial': {'type': 'boolean'}
+ }
+ },
+ 'required': ['resources', 'partial']
+}
+
+message_ttl = {
+ 'type': 'number',
+ 'minimum': 1
+}
+
+list_messages_links = {
+ 'type': 'array',
+ 'maxItems': 1,
+ 'minItems': 1,
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'rel': {'type': 'string'},
+ 'href': {'type': 'string'}
+ },
+ 'required': ['rel', 'href']
+ }
+}
+
+list_messages_response = {
+ 'type': 'array',
+ 'minItems': 1,
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'href': {'type': 'string'},
+ 'ttl': message_ttl,
+ 'age': age,
+ 'body': {'type': 'object'}
+ },
+ 'required': ['href', 'ttl', 'age', 'body']
+ }
+}
+
+list_messages = {
+ 'status_code': [200, 204],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'links': list_messages_links,
+ 'messages': list_messages_response
+ }
+ },
+ 'required': ['links', 'messages']
+}
+
+single_message = {
+ 'type': 'object',
+ 'properties': {
+ 'href': {'type': 'string'},
+ 'ttl': message_ttl,
+ 'age': age,
+ 'body': {'type': 'object'}
+ },
+ 'required': ['href', 'ttl', 'age', 'body']
+}
+
+get_single_message = {
+ 'status_code': [200],
+ 'response_body': single_message
+}
+
+get_multiple_messages = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'array',
+ 'items': single_message,
+ 'minItems': 1
+ }
+}
+
+messages_claimed = {
+ 'type': 'object',
+ 'properties': {
+ 'href': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'ttl': message_ttl,
+ 'age': {'type': 'number'},
+ 'body': {'type': 'object'}
+ },
+ 'required': ['href', 'ttl', 'age', 'body']
+}
+
+claim_messages = {
+ 'status_code': [201, 204],
+ 'response_body': {
+ 'type': 'array',
+ 'items': messages_claimed,
+ 'minItems': 1
+ }
+}
+
+claim_ttl = {
+ 'type': 'number',
+ 'minimum': 1
+}
+
+query_claim = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'age': {'type': 'number'},
+ 'ttl': claim_ttl,
+ 'messages': {
+ 'type': 'array',
+ 'minItems': 1
+ }
+ },
+ 'required': ['ttl', 'age', 'messages']
+ }
+}
diff --git a/tempest/auth.py b/tempest/auth.py
index 9c51edb..830dca9 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -213,7 +213,7 @@
# build authenticated request
# returns new request, it does not touch the original values
_headers = copy.deepcopy(headers) if headers is not None else {}
- _headers['X-Auth-Token'] = token
+ _headers['X-Auth-Token'] = str(token)
if url is None or url == "":
_url = base_url
else:
@@ -223,7 +223,7 @@
parts[2] = re.sub("/{2,}", "/", parts[2])
_url = urlparse.urlunparse(parts)
# no change to method or body
- return _url, _headers, body
+ return str(_url), _headers, body
def _auth_client(self):
raise NotImplementedError
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 6aa98c4..0571f4f 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -83,6 +83,12 @@
return self.cmd_with_auth(
'cinder', action, flags, params, admin, fail_ok)
+ def swift(self, action, flags='', params='', admin=True, fail_ok=False):
+ """Executes swift command for the given action."""
+ flags += ' --os-endpoint-type %s' % CONF.object_storage.endpoint_type
+ return self.cmd_with_auth(
+ 'swift', action, flags, params, admin, fail_ok)
+
def neutron(self, action, flags='', params='', admin=True, fail_ok=False):
"""Executes neutron command for the given action."""
flags += ' --endpoint-type %s' % CONF.network.endpoint_type
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index d9d92a0..946b89e 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -108,7 +108,6 @@
self.assertTableStruct(type_list, ['ID', 'Name'])
def test_cinder_list_extensions(self):
- self.cinder('list-extensions')
roles = self.parser.listing(self.cinder('list-extensions'))
self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated'])
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index c1d58b5..49d079e 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -49,7 +49,8 @@
@test.attr(type='smoke')
def test_neutron_net_list(self):
- self.neutron('net-list')
+ net_list = self.parser.listing(self.neutron('net-list'))
+ self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
@test.attr(type='smoke')
def test_neutron_ext_list(self):
@@ -111,11 +112,14 @@
@test.attr(type='smoke')
@test.requires_ext(extension='external-net', service='network')
def test_neutron_net_external_list(self):
- self.neutron('net-external-list')
+ net_ext_list = self.parser.listing(self.neutron('net-external-list'))
+ self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
@test.attr(type='smoke')
def test_neutron_port_list(self):
- self.neutron('port-list')
+ port_list = self.parser.listing(self.neutron('port-list'))
+ self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
+ 'fixed_ips'])
@test.attr(type='smoke')
@test.requires_ext(extension='quotas', service='network')
@@ -125,7 +129,9 @@
@test.attr(type='smoke')
@test.requires_ext(extension='router', service='network')
def test_neutron_router_list(self):
- self.neutron('router-list')
+ router_list = self.parser.listing(self.neutron('router-list'))
+ self.assertTableStruct(router_list, ['id', 'name',
+ 'external_gateway_info'])
@test.attr(type='smoke')
@test.requires_ext(extension='security-group', service='network')
@@ -136,11 +142,18 @@
@test.attr(type='smoke')
@test.requires_ext(extension='security-group', service='network')
def test_neutron_security_group_rule_list(self):
- self.neutron('security-group-rule-list')
+ security_grp = self.parser.listing(self.neutron
+ ('security-group-rule-list'))
+ self.assertTableStruct(security_grp, ['id', 'security_group',
+ 'direction', 'protocol',
+ 'remote_ip_prefix',
+ 'remote_group'])
@test.attr(type='smoke')
def test_neutron_subnet_list(self):
- self.neutron('subnet-list')
+ subnet_list = self.parser.listing(self.neutron('subnet-list'))
+ self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
+ 'allocation_pools'])
@test.attr(type='smoke')
def test_neutron_help(self):
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/test_swift.py
new file mode 100644
index 0000000..6d6caa7
--- /dev/null
+++ b/tempest/cli/simple_read_only/test_swift.py
@@ -0,0 +1,95 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import subprocess
+
+import tempest.cli
+from tempest import config
+
+CONF = config.CONF
+
+
+class SimpleReadOnlySwiftClientTest(tempest.cli.ClientTestBase):
+ """Basic, read-only tests for Swift CLI client.
+
+ Checks return values and output of read-only commands.
+ These tests do not presume any content, nor do they create
+ their own. They only verify the structure of output if present.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ if not CONF.service_available.swift:
+ msg = ("%s skipped as Swift is not available" % cls.__name__)
+ raise cls.skipException(msg)
+ super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
+
+ def test_swift_fake_action(self):
+ self.assertRaises(subprocess.CalledProcessError,
+ self.swift,
+ 'this-does-not-exist')
+
+ def test_swift_list(self):
+ self.swift('list')
+
+ def test_swift_stat(self):
+ output = self.swift('stat')
+ entries = ['Account', 'Containers', 'Objects', 'Bytes', 'Content-Type',
+ 'X-Timestamp', 'X-Trans-Id']
+ for entry in entries:
+ self.assertTrue(entry in output)
+
+ def test_swift_capabilities(self):
+ output = self.swift('capabilities')
+ entries = ['account_listing_limit', 'container_listing_limit',
+ 'max_file_size', 'Additional middleware']
+ for entry in entries:
+ self.assertTrue(entry in output)
+
+ def test_swift_help(self):
+ help_text = self.swift('', flags='--help')
+ lines = help_text.split('\n')
+ self.assertFirstLineStartsWith(lines, 'Usage: swift')
+
+ commands = []
+ cmds_start = lines.index('Positional arguments:')
+ cmds_end = lines.index('Examples:')
+ command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
+ for line in lines[cmds_start:cmds_end]:
+ match = command_pattern.match(line)
+ if match:
+ commands.append(match.group(1))
+ commands = set(commands)
+ wanted_commands = set(('stat', 'list', 'delete',
+ 'download', 'post', 'upload'))
+ self.assertFalse(wanted_commands - commands)
+
+ # Optional arguments:
+
+ def test_swift_version(self):
+ self.swift('', flags='--version')
+
+ def test_swift_debug_list(self):
+ self.swift('list', flags='--debug')
+
+ def test_swift_retries_list(self):
+ self.swift('list', flags='--retries 3')
+
+ def test_swift_region_list(self):
+ region = CONF.object_storage.region
+ if not region:
+ region = CONF.identity.region
+ self.swift('list', flags='--os-region-name ' + region)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 20ee63e..1d46028 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -35,6 +35,7 @@
from tempest.services.image.v2.json import image_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
+from tempest.services.volume.json import volumes_client
OPTS = {}
USERS = {}
@@ -60,6 +61,7 @@
self.containers = container_client.ContainerClient(_auth)
self.images = image_client.ImageClientV2JSON(_auth)
self.flavors = flavors_client.FlavorsClientJSON(_auth)
+ self.volumes = volumes_client.VolumesClientJSON(_auth)
def load_resources(fname):
@@ -190,6 +192,7 @@
self.check_users()
self.check_objects()
self.check_servers()
+ self.check_volumes()
def check_users(self):
"""Check that the users we expect to exist, do.
@@ -235,6 +238,21 @@
"Server %s is not pingable at %s" % (
server['name'], addr))
+ def check_volumes(self):
+ """Check that the volumes are still there and attached."""
+ for volume in self.res['volumes']:
+ client = client_for_user(volume['owner'])
+ found = _get_volume_by_name(client, volume['name'])
+ self.assertIsNotNone(
+ found,
+ "Couldn't find expected volume %s" % volume['name'])
+
+ # Verify that a volume's attachment retrieved
+ server_id = _get_server_by_name(client, volume['server'])['id']
+ attachment = self.client.get_attachment_from_volume(volume)
+ self.assertEqual(volume['id'], attachment['volume_id'])
+ self.assertEqual(server_id, attachment['server_id'])
+
#######################
#
@@ -339,6 +357,40 @@
#######################
#
+# VOLUMES
+#
+#######################
+
+def _get_volume_by_name(client, name):
+ r, body = client.volumes.list_volumes()
+ for volume in body['volumes']:
+ if name == volume['name']:
+ return volume
+ return None
+
+
+def create_volumes(volumes):
+ for volume in volumes:
+ client = client_for_user(volume['owner'])
+
+ # only create a volume if the name isn't here
+ r, body = client.volumes.list_volumes()
+ if any(item['name'] == volume['name'] for item in body):
+ continue
+
+ client.volumes.create_volume(volume['name'], volume['size'])
+
+
+def attach_volumes(volumes):
+ for volume in volumes:
+ client = client_for_user(volume['owner'])
+
+ server_id = _get_server_by_name(client, volume['server'])['id']
+ client.volumes.attach_volume(volume['name'], server_id)
+
+
+#######################
+#
# MAIN LOGIC
#
#######################
@@ -355,6 +407,8 @@
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
+ create_volumes(RES['volumes'])
+ attach_volumes(RES['volumes'])
def get_options():
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index f7cb8a9..a1f567b 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -32,9 +32,14 @@
aki: cirros-0.3.2-x86_64-vmlinuz
ari: cirros-0.3.2-x86_64-initrd
volumes:
- - assegai:
- - owner: javelin
- - gb: 1
+ - name: assegai
+ server: peltast
+ owner: javelin
+ size: 1
+ - name: pifpouf
+ server: hoplite
+ owner: javelin
+ size: 2
servers:
- name: peltast
owner: javelin
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 6720847..2ab008d 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -50,7 +50,7 @@
def iptables_raw(table):
- return sudo_cmd_call("iptables -v -S -t " + table)
+ return sudo_cmd_call("iptables --line-numbers -L -nv -t " + table)
def ip_ns_list():
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index b4618ed..8166de5 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -106,12 +106,23 @@
roles = self.identity_admin_client.roles.list()
return roles
- def _assign_user_role(self, tenant, user, role):
+ def _assign_user_role(self, tenant, user, role_name):
+ role = None
+ try:
+ roles = self._list_roles()
+ if self.tempest_client:
+ role = next(r for r in roles if r['name'] == role_name)
+ else:
+ role = next(r for r in roles if r.name == role_name)
+ except StopIteration:
+ msg = 'No "%s" role found' % role_name
+ raise exceptions.NotFound(msg)
if self.tempest_client:
- self.identity_admin_client.assign_user_role(tenant, user, role)
+ self.identity_admin_client.assign_user_role(tenant['id'],
+ user['id'], role['id'])
else:
- self.identity_admin_client.roles.add_user_role(user,
- role, tenant=tenant)
+ self.identity_admin_client.roles.add_user_role(user.id, role.id,
+ tenant.id)
def _delete_user(self, user):
if self.tempest_client:
@@ -149,22 +160,11 @@
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self._create_user(username, self.password,
tenant, email)
+ # NOTE(andrey-mp): user needs this role to create containers in swift
+ swift_operator_role = CONF.object_storage.operator_role
+ self._assign_user_role(tenant, user, swift_operator_role)
if admin:
- role = None
- try:
- roles = self._list_roles()
- admin_role = CONF.identity.admin_role
- if self.tempest_client:
- role = next(r for r in roles if r['name'] == admin_role)
- else:
- role = next(r for r in roles if r.name == admin_role)
- except StopIteration:
- msg = "No admin role found"
- raise exceptions.NotFound(msg)
- if self.tempest_client:
- self._assign_user_role(tenant['id'], user['id'], role['id'])
- else:
- self._assign_user_role(tenant.id, user.id, role.id)
+ self._assign_user_role(tenant, user, CONF.identity.admin_role)
return self._get_credentials(user, tenant)
def _get_credentials(self, user, tenant):
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 33128a9..9e0f4d3 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -18,6 +18,7 @@
import json
from lxml import etree
import re
+import string
import time
import jsonschema
@@ -295,9 +296,11 @@
req_url,
secs,
str(req_headers),
- str(req_body)[:2048],
+ filter(lambda x: x in string.printable,
+ str(req_body)[:2048]),
str(resp),
- str(resp_body)[:2048]),
+ filter(lambda x: x in string.printable,
+ str(resp_body)[:2048])),
extra=extra)
def _parse_resp(self, body):
@@ -369,7 +372,7 @@
# The warning is normal for SHOULD/SHOULD NOT case
# Likely it will cause an error
- if not resp_body and resp.status >= 400:
+ if method != 'HEAD' and not resp_body and resp.status >= 400:
self.LOG.warning("status >= 400 response with empty body")
def _request(self, method, url, headers=None, body=None):
diff --git a/tempest/config.py b/tempest/config.py
index e3f0f2a..0796d98 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -310,7 +310,11 @@
cfg.BoolOpt('rdp_console',
default=False,
help='Enable RDP console. This configuration value should '
- 'be same as [nova.rdp]->enabled in nova.conf')
+ 'be same as [nova.rdp]->enabled in nova.conf'),
+ cfg.BoolOpt('rescue',
+ default=True,
+ help='Does the test environment support instance rescue '
+ 'mode?')
]
@@ -438,6 +442,12 @@
default=['all'],
help='A list of enabled network extensions with a special '
'entry all which indicates every extension is enabled'),
+ cfg.BoolOpt('ipv6_subnet_attributes',
+ default=False,
+ help="Allow the execution of IPv6 subnet tests that use "
+ "the extended IPv6 attributes ipv6_ra_mode "
+ "and ipv6_address_mode"
+ )
]
queuing_group = cfg.OptGroup(name='queuing',
@@ -451,6 +461,28 @@
default=20,
help='The maximum number of queue records per page when '
'listing queues'),
+ cfg.IntOpt('max_queue_metadata',
+ default=65536,
+ help='The maximum metadata size for a queue'),
+ cfg.IntOpt('max_messages_per_page',
+ default=20,
+ help='The maximum number of queue message per page when '
+ 'listing (or) posting messages'),
+ cfg.IntOpt('max_message_size',
+ default=262144,
+ help='The maximum size of a message body'),
+ cfg.IntOpt('max_messages_per_claim',
+ default=20,
+ help='The maximum number of messages per claim'),
+ cfg.IntOpt('max_message_ttl',
+ default=1209600,
+ help='The maximum ttl for a message'),
+ cfg.IntOpt('max_claim_ttl',
+ default=43200,
+ help='The maximum ttl for a claim'),
+ cfg.IntOpt('max_claim_grace',
+ default=43200,
+ help='The maximum grace period for a claim'),
]
volume_group = cfg.OptGroup(name='volume',
@@ -910,6 +942,9 @@
cfg.BoolOpt('driver_enabled',
default=False,
help="Whether the Ironic nova-compute driver is enabled"),
+ cfg.StrOpt('driver',
+ default='fake',
+ help="Driver name which Ironic uses"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
@@ -920,14 +955,14 @@
default=300,
help="Timeout for Ironic node to completely provision"),
cfg.IntOpt('association_timeout',
- default=10,
+ default=30,
help="Timeout for association of Nova instance and Ironic "
"node"),
cfg.IntOpt('power_timeout',
- default=20,
+ default=60,
help="Timeout for Ironic power transitions."),
cfg.IntOpt('unprovision_timeout',
- default=20,
+ default=60,
help="Timeout for unprovisioning an Ironic node.")
]
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 183d422..93329bc 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -50,7 +50,7 @@
T104: Scenario tests require a services decorator
"""
- if 'tempest/scenario/test_' in filename:
+ if 'tempest/scenario/' in filename and '/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
@@ -105,9 +105,24 @@
"T107: service tag should not be in path")
+def no_official_client_manager_in_api_tests(physical_line, filename):
+ """Check that the OfficialClientManager isn't used in the api tests
+
+ The api tests should not use the official clients.
+
+ T108: Can not use OfficialClientManager in the API tests
+ """
+ if 'tempest/api' in filename:
+ if 'OfficialClientManager' in physical_line:
+ return (physical_line.find('OfficialClientManager'),
+ 'T108: OfficialClientManager can not be used in the api '
+ 'tests')
+
+
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
register(no_setupclass_for_unit_tests)
register(no_vi_headers)
register(service_tags_not_in_module_path)
+ register(no_official_client_manager_in_api_tests)
diff --git a/tempest/openstack/common/config/generator.py b/tempest/openstack/common/config/generator.py
index 8156cc5..664200e 100644
--- a/tempest/openstack/common/config/generator.py
+++ b/tempest/openstack/common/config/generator.py
@@ -150,7 +150,7 @@
def _is_in_group(opt, group):
- "Check if opt is in group."
+ """Check if opt is in group."""
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
@@ -159,7 +159,7 @@
return False
-def _guess_groups(opt, mod_obj):
+def _guess_groups(opt):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
@@ -193,7 +193,7 @@
ret = {}
for opt in opts:
- ret.setdefault(_guess_groups(opt, obj), []).append(opt)
+ ret.setdefault(_guess_groups(opt), []).append(opt)
return ret.items()
@@ -223,6 +223,8 @@
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
+ hostname = socket.gethostname()
+ fqdn = socket.getfqdn()
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
@@ -234,8 +236,13 @@
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
- elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
- return 'tempest'
+ elif value in (hostname, fqdn):
+ if 'host' in name:
+ return 'tempest'
+ elif value.endswith(hostname):
+ return value.replace(hostname, 'tempest')
+ elif value.endswith(fqdn):
+ return value.replace(fqdn, 'tempest')
elif value.strip() != value:
return '"%s"' % value
return value
@@ -246,7 +253,6 @@
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
- opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
index 17f66f7..6102e67 100644
--- a/tempest/openstack/common/gettextutils.py
+++ b/tempest/openstack/common/gettextutils.py
@@ -32,24 +32,113 @@
from babel import localedata
import six
-_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
-_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
-
-# We use separate translation catalogs for each log level, so set up a
-# mapping between the log level name and the translator. The domain
-# for the log level is project_name + "-log-" + log_level so messages
-# for each level end up in their own catalog.
-_t_log_levels = dict(
- (level, gettext.translation('tempest' + '-log-' + level,
- localedir=_localedir,
- fallback=True))
- for level in ['info', 'warning', 'error', 'critical']
-)
-
_AVAILABLE_LANGUAGES = {}
+
+# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
+class TranslatorFactory(object):
+ """Create translator functions
+ """
+
+ def __init__(self, domain, lazy=False, localedir=None):
+ """Establish a set of translation functions for the domain.
+
+ :param domain: Name of translation domain,
+ specifying a message catalog.
+ :type domain: str
+ :param lazy: Delays translation until a message is emitted.
+ Defaults to False.
+ :type lazy: Boolean
+ :param localedir: Directory with translation catalogs.
+ :type localedir: str
+ """
+ self.domain = domain
+ self.lazy = lazy
+ if localedir is None:
+ localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
+ self.localedir = localedir
+
+ def _make_translation_func(self, domain=None):
+ """Return a new translation function ready for use.
+
+ Takes into account whether or not lazy translation is being
+ done.
+
+ The domain can be specified to override the default from the
+ factory, but the localedir from the factory is always used
+ because we assume the log-level translation catalogs are
+ installed in the same directory as the main application
+ catalog.
+
+ """
+ if domain is None:
+ domain = self.domain
+ if self.lazy:
+ return functools.partial(Message, domain=domain)
+ t = gettext.translation(
+ domain,
+ localedir=self.localedir,
+ fallback=True,
+ )
+ if six.PY3:
+ return t.gettext
+ return t.ugettext
+
+ @property
+ def primary(self):
+ "The default translation function."
+ return self._make_translation_func()
+
+ def _make_log_translation_func(self, level):
+ return self._make_translation_func(self.domain + '-log-' + level)
+
+ @property
+ def log_info(self):
+ "Translate info-level log messages."
+ return self._make_log_translation_func('info')
+
+ @property
+ def log_warning(self):
+ "Translate warning-level log messages."
+ return self._make_log_translation_func('warning')
+
+ @property
+ def log_error(self):
+ "Translate error-level log messages."
+ return self._make_log_translation_func('error')
+
+ @property
+ def log_critical(self):
+ "Translate critical-level log messages."
+ return self._make_log_translation_func('critical')
+
+
+# NOTE(dhellmann): When this module moves out of the incubator into
+# oslo.i18n, these global variables can be moved to an integration
+# module within each application.
+
+# Create the global translation functions.
+_translators = TranslatorFactory('tempest')
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
+
+# NOTE(dhellmann): End of globals that will move to the application's
+# integration module.
+
+
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
@@ -58,41 +147,18 @@
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
- global USE_LAZY
+ # FIXME(dhellmann): This function will be removed in oslo.i18n,
+ # because the TranslatorFactory makes it superfluous.
+ global _, _LI, _LW, _LE, _LC, USE_LAZY
+ tf = TranslatorFactory('tempest', lazy=True)
+ _ = tf.primary
+ _LI = tf.log_info
+ _LW = tf.log_warning
+ _LE = tf.log_error
+ _LC = tf.log_critical
USE_LAZY = True
-def _(msg):
- if USE_LAZY:
- return Message(msg, domain='tempest')
- else:
- if six.PY3:
- return _t.gettext(msg)
- return _t.ugettext(msg)
-
-
-def _log_translation(msg, level):
- """Build a single translation of a log message
- """
- if USE_LAZY:
- return Message(msg, domain='tempest' + '-log-' + level)
- else:
- translator = _t_log_levels[level]
- if six.PY3:
- return translator.gettext(msg)
- return translator.ugettext(msg)
-
-# Translators for log levels.
-#
-# The abbreviated names are meant to reflect the usual use of a short
-# name like '_'. The "L" is for "log" and the other letter comes from
-# the level.
-_LI = functools.partial(_log_translation, level='info')
-_LW = functools.partial(_log_translation, level='warning')
-_LE = functools.partial(_log_translation, level='error')
-_LC = functools.partial(_log_translation, level='critical')
-
-
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
@@ -112,26 +178,9 @@
any available locale.
"""
if lazy:
- # NOTE(mrodden): Lazy gettext functionality.
- #
- # The following introduces a deferred way to do translations on
- # messages in OpenStack. We override the standard _() function
- # and % (format string) operation to build Message objects that can
- # later be translated when we have more information.
- def _lazy_gettext(msg):
- """Create and return a Message object.
-
- Lazy gettext function for a given domain, it is a factory method
- for a project/module to get a lazy gettext function for its own
- translation domain (i.e. nova, glance, cinder, etc.)
-
- Message encapsulates a string so that we can translate
- it later when needed.
- """
- return Message(msg, domain=domain)
-
from six import moves
- moves.builtins.__dict__['_'] = _lazy_gettext
+ tf = TranslatorFactory(domain, lazy=True)
+ moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
@@ -274,13 +323,14 @@
def __radd__(self, other):
return self.__add__(other)
- def __str__(self):
- # NOTE(luisg): Logging in python 2.6 tries to str() log records,
- # and it expects specifically a UnicodeError in order to proceed.
- msg = _('Message objects do not support str() because they may '
- 'contain non-ascii characters. '
- 'Please use unicode() or translate() instead.')
- raise UnicodeError(msg)
+ if six.PY2:
+ def __str__(self):
+ # NOTE(luisg): Logging in python 2.6 tries to str() log records,
+ # and it expects specifically a UnicodeError in order to proceed.
+ msg = _('Message objects do not support str() because they may '
+ 'contain non-ascii characters. '
+ 'Please use unicode() or translate() instead.')
+ raise UnicodeError(msg)
def get_available_languages(domain):
diff --git a/tempest/openstack/common/importutils.py b/tempest/openstack/common/importutils.py
index 6c0d3b2..d5dd22f 100644
--- a/tempest/openstack/common/importutils.py
+++ b/tempest/openstack/common/importutils.py
@@ -24,10 +24,10 @@
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
+ __import__(mod_str)
try:
- __import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
- except (ValueError, AttributeError):
+ except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 07d8828..ca79325 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -21,6 +21,8 @@
import subprocess
import time
+from cinderclient import exceptions as cinder_exceptions
+import glanceclient
from heatclient import exc as heat_exceptions
import netaddr
from neutronclient.common import exceptions as exc
@@ -83,8 +85,6 @@
cls.orchestration_client = cls.manager.orchestration_client
cls.data_processing_client = cls.manager.data_processing_client
cls.ceilometer_client = cls.manager.ceilometer_client
- cls.resource_keys = {}
- cls.os_resources = []
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -109,72 +109,85 @@
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'identity_admin')
- @staticmethod
- def cleanup_resource(resource, test_name):
+ def setUp(self):
+ super(OfficialClientTest, self).setUp()
+ self.cleanup_waits = []
+ # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
+ # because scenario tests in the same test class should not share
+ # resources. If resources were shared between test cases then it
+ # should be a single scenario test instead of multiples.
- LOG.debug("Deleting %r from shared resources of %s" %
- (resource, test_name))
+ # NOTE(yfried): this list is cleaned at the end of test_methods and
+ # not at the end of the class
+ self.addCleanup(self._wait_for_cleanups)
+
+ @staticmethod
+ def not_found_exception(exception):
+ """
+ @return: True if exception is of NotFound type
+ """
+ NOT_FOUND_LIST = ['NotFound', 'HTTPNotFound']
+ return (exception.__class__.__name__ in NOT_FOUND_LIST
+ or
+ hasattr(exception, 'status_code') and
+ exception.status_code == 404)
+
+ def delete_wrapper(self, thing):
+ """Ignores NotFound exceptions for delete operations.
+
+ @param thing: object with delete() method.
+ OpenStack resources are assumed to have a delete() method which
+ destroys the resource
+ """
+
try:
- # OpenStack resources are assumed to have a delete()
- # method which destroys the resource...
- resource.delete()
+ thing.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
- # - Status code tolerated as a workaround for bug 1247568
- # - HTTPNotFound tolerated as this is currently raised when
- # attempting to delete an already-deleted heat stack.
- if (e.__class__.__name__ in ('NotFound', 'HTTPNotFound') or
- (hasattr(e, 'status_code') and e.status_code == 404)):
- return
- raise
-
- def is_deletion_complete():
- # Deletion testing is only required for objects whose
- # existence cannot be checked via retrieval.
- if isinstance(resource, dict):
- return True
- try:
- resource.get()
- except Exception as e:
- # Clients are expected to return an exception
- # called 'NotFound' if retrieval fails.
- if e.__class__.__name__ == 'NotFound':
- return True
+ if not self.not_found_exception(e):
raise
- return False
- # Block until resource deletion has completed or timed-out
- tempest.test.call_until_true(is_deletion_complete, 10, 1)
+ def _wait_for_cleanups(self):
+ """To handle async delete actions, a list of waits is added
+ which will be iterated over as the last step of clearing the
+ cleanup queue. That way all the delete calls are made up front
+ and the tests won't succeed unless the deletes are eventually
+ successful. This is the same basic approach used in the api tests to
+ limit cleanup execution time except here it is multi-resource,
+ because of the nature of the scenario tests.
+ """
+ for wait in self.cleanup_waits:
+ self.delete_timeout(**wait)
- @classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Because scenario tests are typically run in a
- # specific order, and because test methods in scenario tests
- # generally create resources in a particular order, we destroy
- # resources in the reverse order in which resources are added to
- # the scenario test class object
- while cls.os_resources:
- thing = cls.os_resources.pop()
- cls.cleanup_resource(thing, cls.__name__)
- cls.isolated_creds.clear_isolated_creds()
- super(OfficialClientTest, cls).tearDownClass()
+ def addCleanup_with_wait(self, things, thing_id,
+ error_status='ERROR',
+ exc_type=nova_exceptions.NotFound,
+ cleanup_callable=None, cleanup_args=[],
+ cleanup_kwargs={}):
+ """Adds wait for ansyc resource deletion at the end of cleanups
- @classmethod
- def set_resource(cls, key, thing):
- LOG.debug("Adding %r to shared resources of %s" %
- (thing, cls.__name__))
- cls.resource_keys[key] = thing
- cls.os_resources.append(thing)
-
- @classmethod
- def get_resource(cls, key):
- return cls.resource_keys[key]
-
- @classmethod
- def remove_resource(cls, key):
- thing = cls.resource_keys[key]
- cls.os_resources.remove(thing)
- del cls.resource_keys[key]
+ @param things: type of the resource to delete
+ @param thing_id:
+ @param error_status: see manager.delete_timeout()
+ @param exc_type: see manager.delete_timeout()
+ @param cleanup_callable: method to load pass to self.addCleanup with
+ the following *cleanup_args, **cleanup_kwargs.
+ usually a delete method. if not used, will try to use:
+ things.delete(thing_id)
+ """
+ if cleanup_callable is None:
+ LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
+ " default".format(rclass=things, id=thing_id))
+ self.addCleanup(things.delete, thing_id)
+ else:
+ self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
+ wait_dict = {
+ 'things': things,
+ 'thing_id': thing_id,
+ 'error_status': error_status,
+ 'not_found_exception': exc_type,
+ }
+ self.cleanup_waits.append(wait_dict)
def status_timeout(self, things, thing_id, expected_status,
error_status='ERROR',
@@ -226,8 +239,11 @@
except not_found_exception:
if allow_notfound:
return True
- else:
- raise
+ raise
+ except Exception as e:
+ if allow_notfound and self.not_found_exception(e):
+ return True
+ raise
new_status = thing.status
@@ -287,6 +303,7 @@
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
+ self.addCleanup(self.delete_wrapper, sg_rule)
rules.append(sg_rule)
return rules
@@ -300,7 +317,7 @@
secgroup = client.security_groups.create(sg_name, sg_desc)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
+ self.addCleanup(self.delete_wrapper, secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(client, secgroup.id)
@@ -308,7 +325,17 @@
return secgroup
def create_server(self, client=None, name=None, image=None, flavor=None,
- wait=True, create_kwargs={}):
+ wait_on_boot=True, wait_on_delete=True,
+ create_kwargs={}):
+ """Creates VM instance.
+
+ @param client: compute client to create the instance
+ @param image: image from which to create the instance
+ @param wait_on_boot: wait for status ACTIVE before continue
+ @param wait_on_delete: force synchronous delete on cleanup
+ @param create_kwargs: additional details for instance creation
+ @return: client.server object
+ """
if client is None:
client = self.compute_client
if name is None:
@@ -342,19 +369,25 @@
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
- self.set_resource(name, server)
- if wait:
+ if wait_on_delete:
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ self.addCleanup_with_wait(self.compute_client.servers, server.id,
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[server])
+ if wait_on_boot:
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
- self.set_resource(name, server)
LOG.debug("Created server: %s", server)
return server
def create_volume(self, client=None, size=1, name=None,
- snapshot_id=None, imageRef=None):
+ snapshot_id=None, imageRef=None, volume_type=None,
+ wait_on_delete=True):
if client is None:
client = self.volume_client
if name is None:
@@ -362,8 +395,14 @@
LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
volume = client.volumes.create(size=size, display_name=name,
snapshot_id=snapshot_id,
- imageRef=imageRef)
- self.set_resource(name, volume)
+ imageRef=imageRef,
+ volume_type=volume_type)
+ if wait_on_delete:
+ self.addCleanup(self.delete_timeout,
+ self.volume_client.volumes,
+ volume.id)
+ self.addCleanup_with_wait(self.volume_client.volumes, volume.id,
+ exc_type=cinder_exceptions.NotFound)
self.assertEqual(name, volume.display_name)
self.status_timeout(client.volumes, volume.id, 'available')
LOG.debug("Created volume: %s", volume)
@@ -379,7 +418,8 @@
name = data_utils.rand_name('scenario-snapshot-')
LOG.debug("Creating a snapshot image for server: %s", server.name)
image_id = compute_client.servers.create_image(server, name)
- self.addCleanup(image_client.images.delete, image_id)
+ self.addCleanup_with_wait(self.image_client.images, image_id,
+ exc_type=glanceclient.exc.HTTPNotFound)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
self.assertEqual(name, snapshot_image.name)
@@ -394,7 +434,7 @@
name = data_utils.rand_name('scenario-keypair-')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
- self.set_resource(name, keypair)
+ self.addCleanup(self.delete_wrapper, keypair)
return keypair
def get_remote_client(self, server_or_ip, username=None, private_key=None):
@@ -588,9 +628,12 @@
'key_name': self.keypair.id
}
self.instance = self.create_server(
- wait=False, create_kwargs=create_kwargs)
+ wait_on_boot=False, create_kwargs=create_kwargs)
- self.set_resource('instance', self.instance)
+ self.addCleanup_with_wait(self.compute_client.servers,
+ self.instance.id,
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.instance])
self.wait_node(self.instance.id)
self.node = self.get_node(instance_id=self.instance.id)
@@ -615,7 +658,6 @@
def terminate_instance(self):
self.instance.delete()
- self.remove_resource('instance')
self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node.uuid,
@@ -623,6 +665,75 @@
timeout=CONF.baremetal.unprovision_timeout)
+class EncryptionScenarioTest(OfficialClientTest):
+ """
+ Base class for encryption scenario tests
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(EncryptionScenarioTest, cls).setUpClass()
+
+ # use admin credentials to create encrypted volume types
+ admin_creds = cls.admin_credentials()
+ manager = clients.OfficialClientManager(credentials=admin_creds)
+ cls.admin_volume_client = manager.volume_client
+
+ def _wait_for_volume_status(self, status):
+ self.status_timeout(
+ self.volume_client.volumes, self.volume.id, status)
+
+ def nova_boot(self):
+ self.keypair = self.create_keypair()
+ create_kwargs = {'key_name': self.keypair.name}
+ self.server = self.create_server(self.compute_client,
+ image=self.image,
+ create_kwargs=create_kwargs)
+
+ def create_volume_type(self, client=None, name=None):
+ if not client:
+ client = self.admin_volume_client
+ if not name:
+ name = 'generic'
+ randomized_name = data_utils.rand_name('scenario-type-' + name + '-')
+ LOG.debug("Creating a volume type: %s", randomized_name)
+ volume_type = client.volume_types.create(randomized_name)
+ self.addCleanup(client.volume_types.delete, volume_type.id)
+ return volume_type
+
+ def create_encryption_type(self, client=None, type_id=None, provider=None,
+ key_size=None, cipher=None,
+ control_location=None):
+ if not client:
+ client = self.admin_volume_client
+ if not type_id:
+ volume_type = self.create_volume_type()
+ type_id = volume_type.id
+ LOG.debug("Creating an encryption type for volume type: %s", type_id)
+ client.volume_encryption_types.create(type_id,
+ {'provider': provider,
+ 'key_size': key_size,
+ 'cipher': cipher,
+ 'control_location':
+ control_location})
+
+ def nova_volume_attach(self):
+ attach_volume_client = self.compute_client.volumes.create_server_volume
+ volume = attach_volume_client(self.server.id,
+ self.volume.id,
+ '/dev/vdb')
+ self.assertEqual(self.volume.id, volume.id)
+ self._wait_for_volume_status('in-use')
+
+ def nova_volume_detach(self):
+ detach_volume_client = self.compute_client.volumes.delete_server_volume
+ detach_volume_client(self.server.id, self.volume.id)
+ self._wait_for_volume_status('available')
+
+ volume = self.volume_client.volumes.get(self.volume.id)
+ self.assertEqual('available', volume.status)
+
+
class NetworkScenarioTest(OfficialClientTest):
"""
Base class for network scenario tests
@@ -660,7 +771,7 @@
network = net_common.DeletableNetwork(client=self.network_client,
**result['network'])
self.assertEqual(network.name, name)
- self.set_resource(name, network)
+ self.addCleanup(self.delete_wrapper, network)
return network
def _list_networks(self, **kwargs):
@@ -736,7 +847,7 @@
subnet = net_common.DeletableSubnet(client=self.network_client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
- self.set_resource(data_utils.rand_name(namestart), subnet)
+ self.addCleanup(self.delete_wrapper, subnet)
return subnet
def _create_port(self, network, namestart='port-quotatest-'):
@@ -749,7 +860,7 @@
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_common.DeletablePort(client=self.network_client,
**result['port'])
- self.set_resource(name, port)
+ self.addCleanup(self.delete_wrapper, port)
return port
def _get_server_port_id(self, server, ip_addr=None):
@@ -772,7 +883,7 @@
floating_ip = net_common.DeletableFloatingIp(
client=self.network_client,
**result['floatingip'])
- self.set_resource(data_utils.rand_name('floatingip-'), floating_ip)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
@@ -817,7 +928,7 @@
pool = net_common.DeletablePool(client=self.network_client,
**resp['pool'])
self.assertEqual(pool['name'], name)
- self.set_resource(name, pool)
+ self.addCleanup(self.delete_wrapper, pool)
return pool
def _create_member(self, address, protocol_port, pool_id):
@@ -832,7 +943,7 @@
resp = self.network_client.create_member(body)
member = net_common.DeletableMember(client=self.network_client,
**resp['member'])
- self.set_resource(data_utils.rand_name('member-'), member)
+ self.addCleanup(self.delete_wrapper, member)
return member
def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
@@ -851,7 +962,7 @@
vip = net_common.DeletableVip(client=self.network_client,
**resp['vip'])
self.assertEqual(vip['name'], name)
- self.set_resource(name, vip)
+ self.addCleanup(self.delete_wrapper, vip)
return vip
def _check_vm_connectivity(self, ip_address,
@@ -993,7 +1104,7 @@
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
+ self.addCleanup(self.delete_wrapper, secgroup)
return secgroup
def _default_security_group(self, tenant_id, client=None):
@@ -1052,6 +1163,7 @@
client=client,
**sg_rule['security_group_rule']
)
+ self.addCleanup(self.delete_wrapper, sg_rule)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
@@ -1150,7 +1262,7 @@
router = net_common.DeletableRouter(client=self.network_client,
**result['router'])
self.assertEqual(router.name, name)
- self.set_resource(name, router)
+ self.addCleanup(self.delete_wrapper, router)
return router
def _create_networks(self, tenant_id=None):
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index 82ba3c5..aa7b6f8 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -59,7 +59,7 @@
# if a keypair was set, do not delete the stack on exit to allow
# for manual post-mortums
if not CONF.orchestration.keypair_name:
- self.set_resource('stack', self.stack)
+ self.addCleanup(self.client.stacks.delete, self.stack)
@test.skip_because(bug="1257575")
@test.attr(type='slow')
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 6817c48..0059619 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -42,13 +42,12 @@
availability_zone = kwargs['availability_zone']
self.assertEqual(aggregate.name, aggregate_name)
self.assertEqual(aggregate.availability_zone, availability_zone)
- self.set_resource(aggregate.id, aggregate)
+ self.addCleanup(self._delete_aggregate, aggregate)
LOG.debug("Aggregate %s created." % (aggregate.name))
return aggregate
def _delete_aggregate(self, aggregate):
self.compute_client.aggregates.delete(aggregate.id)
- self.remove_resource(aggregate.id)
LOG.debug("Aggregate %s deleted. " % (aggregate.name))
def _get_host_name(self):
@@ -60,6 +59,7 @@
def _add_host(self, aggregate_name, host):
aggregate = self.compute_client.aggregates.add_host(aggregate_name,
host)
+ self.addCleanup(self._remove_host, aggregate, host)
self.assertIn(host, aggregate.hosts)
LOG.debug("Host %s added to Aggregate %s." % (host, aggregate.name))
@@ -128,6 +128,3 @@
metadata.update(additional_metadata)
self._check_aggregate_details(aggregate, aggregate.name, az, [host],
metadata)
-
- self._remove_host(aggregate, host)
- self._delete_aggregate(aggregate)
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
new file mode 100644
index 0000000..366cd93
--- /dev/null
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.scenario import manager
+from tempest import test
+
+
+class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest):
+
+ """
+ This test is for verifying the functionality of encrypted cinder volumes.
+
+ For both LUKS and cryptsetup encryption types, this test performs
+ the following:
+ * Creates an image in Glance
+ * Boots an instance from the image
+ * Creates an encryption type (as admin)
+ * Creates a volume of that encryption type (as a regular user)
+ * Attaches and detaches the encrypted volume to the instance
+ """
+
+ def launch_instance(self):
+ self.glance_image_create()
+ self.nova_boot()
+
+ def create_encrypted_volume(self, encryption_provider):
+ volume_type = self.create_volume_type(name='luks')
+ self.create_encryption_type(type_id=volume_type.id,
+ provider=encryption_provider,
+ key_size=512,
+ cipher='aes-xts-plain64',
+ control_location='front-end')
+ self.volume = self.create_volume(volume_type=volume_type.name)
+
+ def attach_detach_volume(self):
+ self.nova_volume_attach()
+ self.nova_volume_detach()
+
+ @test.services('compute', 'volume', 'image')
+ def test_encrypted_cinder_volumes_luks(self):
+ self.launch_instance()
+ self.create_encrypted_volume('nova.volume.encryptors.'
+ 'luks.LuksEncryptor')
+ self.attach_detach_volume()
+
+ @test.services('compute', 'volume', 'image')
+ def test_encrypted_cinder_volumes_cryptsetup(self):
+ self.launch_instance()
+ self.create_encrypted_volume('nova.volume.encryptors.'
+ 'cryptsetup.CryptsetupEncryptor')
+ self.attach_detach_volume()
\ No newline at end of file
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index ed5743c..15cf13b 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -60,7 +60,13 @@
# needed because of bug 1199788
self.servers = [x for x in client.servers.list() if name in x.name]
for server in self.servers:
- self.set_resource(server.name, server)
+ # after deleting all servers - wait for all servers to clear
+ # before cleanup continues
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ for server in self.servers:
+ self.addCleanup_with_wait(self.compute_client.servers, server.id)
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 03cfef5..e041fd2 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -14,7 +14,6 @@
# under the License.
-import httplib
import tempfile
import time
import urllib2
@@ -75,15 +74,11 @@
self.server_fixed_ips = {}
self._create_security_group()
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def _create_security_group(self):
self.security_group = self._create_security_group_neutron(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
- self.addCleanup(self.cleanup_wrapper, self.security_group)
def _create_security_group_rules_for_port(self, port):
rule = {
@@ -100,7 +95,6 @@
def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
- self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
@@ -112,14 +106,12 @@
}
server = self.create_server(name=name,
create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
self.servers_keypairs[server.id] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
@@ -150,7 +142,10 @@
private_key=private_key)
# Write a backend's responce into a file
- resp = """HTTP/1.0 200 OK\r\nContent-Length: 8\r\n\r\n%s"""
+ resp = """echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n""" \
+ """Connection: close\r\nContent-Type: text/html; """ \
+ """charset=UTF-8\r\n\r\n%s"; cat >/dev/null"""
+
with tempfile.NamedTemporaryFile() as script:
script.write(resp % server_name)
script.flush()
@@ -158,15 +153,17 @@
key.write(private_key)
key.flush()
commands.copy_file_to_host(script.name,
- "~/script1",
+ "/tmp/script1",
ip,
username, key.name)
+
# Start netcat
- start_server = """sudo nc -ll -p %(port)s -e cat """ \
- """~/%(script)s &"""
+ start_server = """sudo nc -ll -p %(port)s -e sh """ \
+ """/tmp/%(script)s &"""
cmd = start_server % {'port': self.port1,
'script': 'script1'}
ssh_client.exec_command(cmd)
+
if len(self.server_ips) == 1:
with tempfile.NamedTemporaryFile() as script:
script.write(resp % 'server2')
@@ -175,7 +172,7 @@
key.write(private_key)
key.flush()
commands.copy_file_to_host(script.name,
- "~/script2", ip,
+ "/tmp/script2", ip,
username, key.name)
cmd = start_server % {'port': self.port2,
'script': 'script2'}
@@ -207,7 +204,6 @@
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet.id)
- self.addCleanup(self.cleanup_wrapper, self.pool)
self.assertTrue(self.pool)
def _create_members(self):
@@ -223,17 +219,14 @@
member1 = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member1)
member2 = self._create_member(address=ip,
protocol_port=self.port2,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member2)
self.members.extend([member1, member2])
else:
member = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, member)
self.members.append(member)
self.assertTrue(self.members)
@@ -242,7 +235,6 @@
port_id = vip.port_id
floating_ip = self._create_floating_ip(vip, public_network_id,
port_id=port_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
@@ -253,7 +245,6 @@
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
- self.addCleanup(self.cleanup_wrapper, self.vip)
self.status_timeout(NeutronRetriever(self.network_client,
self.network_client.vip_path,
net_common.DeletableVip),
@@ -281,19 +272,14 @@
def _send_requests(self, vip_ip, expected, num_req=10):
count = 0
while count < num_req:
- try:
- resp = []
- for i in range(len(self.members)):
- resp.append(
- urllib2.urlopen(
- "http://{0}/".format(vip_ip)).read())
- count += 1
- self.assertEqual(expected,
- set(resp))
- # NOTE: There always is a slim chance of getting this exception
- # due to special aspects of haproxy internal behavior.
- except httplib.BadStatusLine:
- pass
+ resp = []
+ for i in range(len(self.members)):
+ resp.append(
+ urllib2.urlopen(
+ "http://{0}/".format(vip_ip)).read())
+ count += 1
+ self.assertEqual(expected,
+ set(resp))
@test.attr(type='smoke')
@test.services('compute', 'network')
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 0406217..29fdc74 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -85,7 +85,7 @@
def nova_floating_ip_create(self):
self.floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, self.floating_ip)
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f1cd320..431de9a 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -50,23 +50,15 @@
cls.enabled = False
raise cls.skipException(msg)
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def setUp(self):
super(TestNetworkAdvancedServerOps, self).setUp()
key_name = data_utils.rand_name('keypair-smoke-')
self.keypair = self.create_keypair(name=key_name)
- self.addCleanup(self.cleanup_wrapper, self.keypair)
security_group =\
self._create_security_group_neutron(tenant_id=self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, security_group)
network = self._create_network(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, network)
router = self._get_router(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, router)
subnet = self._create_subnet(network)
- self.addCleanup(self.cleanup_wrapper, subnet)
subnet.add_to_router(router.id)
public_network_id = CONF.network.public_network_id
create_kwargs = {
@@ -79,10 +71,8 @@
server_name = data_utils.rand_name('server-smoke-%d-')
self.server = self.create_server(name=server_name,
create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, self.server)
self.floating_ip = self._create_floating_ip(self.server,
public_network_id)
- self.addCleanup(self.cleanup_wrapper, self.floating_ip)
def _check_network_connectivity(self, should_connect=True):
username = CONF.compute.image_ssh_user
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index c84d4b9..7dc817d 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -96,17 +96,11 @@
raise cls.skipException(msg)
cls.check_preconditions()
- def cleanup_wrapper(self, resource):
- self.cleanup_resource(resource, self.__class__.__name__)
-
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.security_group = \
self._create_security_group_neutron(tenant_id=self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, self.security_group)
self.network, self.subnet, self.router = self._create_networks()
- for r in [self.network, self.router, self.subnet]:
- self.addCleanup(self.cleanup_wrapper, r)
self.check_networks()
self.servers = {}
name = data_utils.rand_name('server-smoke')
@@ -144,7 +138,6 @@
def _create_server(self, name, network):
keypair = self.create_keypair(name='keypair-%s' % name)
- self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
create_kwargs = {
'nics': [
@@ -154,7 +147,6 @@
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
return dict(server=server, keypair=keypair)
def _check_tenant_network_connectivity(self):
@@ -171,7 +163,6 @@
for server in self.servers.keys():
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
@@ -204,11 +195,9 @@
def _create_new_network(self):
self.new_net = self._create_network(self.tenant_id)
- self.addCleanup(self.cleanup_wrapper, self.new_net)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
- self.addCleanup(self.cleanup_wrapper, self.new_subnet)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
@@ -226,7 +215,10 @@
port_id=None,
fixed_ip=None)
# move server to the head of the cleanup list
- self.addCleanup(self.cleanup_wrapper, server)
+ self.addCleanup(self.delete_timeout,
+ self.compute_client.servers,
+ server.id)
+ self.addCleanup(self.delete_wrapper, server)
def check_ports():
port_list = [port for port in
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index dd89dc0..8058b3d 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -165,7 +165,6 @@
def _create_tenant_keypairs(self, tenant_id):
keypair = self.create_keypair(
name=data_utils.rand_name('keypair-smoke-'))
- self.addCleanup(self.cleanup_wrapper, keypair)
self.tenants[tenant_id].keypair = keypair
def _create_tenant_security_groups(self, tenant):
@@ -173,14 +172,12 @@
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id
)
- self.addCleanup(self.cleanup_wrapper, access_sg)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id
)
- self.addCleanup(self.cleanup_wrapper, def_sg)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
protocol='tcp',
@@ -188,9 +185,7 @@
port_range_max=22,
direction='ingress',
)
- rule = self._create_security_group_rule(secgroup=access_sg,
- **ssh_rule)
- self.addCleanup(self.cleanup_wrapper, rule)
+ self._create_security_group_rule(secgroup=access_sg, **ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
@@ -238,7 +233,6 @@
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
- self.addCleanup(self.cleanup_wrapper, server)
return server
def _create_tenant_servers(self, tenant, num=1):
@@ -269,13 +263,10 @@
def _assign_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
- self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(server, floating_ip)
def _create_tenant_network(self, tenant):
network, subnet, router = self._create_networks(tenant.creds.tenant_id)
- for r in [network, router, subnet]:
- self.addCleanup(self.cleanup_wrapper, r)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
@@ -355,11 +346,10 @@
remote_group_id=tenant.security_groups['default'].id,
direction='ingress'
)
- rule = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule)
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
@@ -385,11 +375,10 @@
protocol='icmp',
direction='ingress'
)
- rule_s2d = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule_s2d)
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
@@ -399,11 +388,10 @@
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
- rule_d2s = self._create_security_group_rule(
+ self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
**ruleset
)
- self.addCleanup(self.cleanup_wrapper, rule_d2s)
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 54f1d9e..38686d9 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -74,23 +74,17 @@
'key_name': self.keypair.id,
'security_groups': security_groups
}
- instance = self.create_server(image=self.image_ref,
- flavor=self.flavor_ref,
- create_kwargs=create_kwargs)
- self.set_resource('instance', instance)
-
- def terminate_instance(self):
- instance = self.get_resource('instance')
- instance.delete()
- self.remove_resource('instance')
+ self.instance = self.create_server(image=self.image_ref,
+ flavor=self.flavor_ref,
+ create_kwargs=create_kwargs)
def verify_ssh(self):
if self.run_ssh:
# Obtain a floating IP
floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(self.delete_wrapper, floating_ip)
# Attach a floating IP
- instance = self.get_resource('instance')
- instance.add_floating_ip(floating_ip)
+ self.instance.add_floating_ip(floating_ip)
# Check ssh
try:
self.get_remote_client(
@@ -108,4 +102,4 @@
self.security_group = self._create_security_group_nova()
self.boot_instance()
self.verify_ssh()
- self.terminate_instance()
+ self.instance.delete()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index d41490a..7dd662d 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -65,7 +65,7 @@
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _set_floating_ip_to_server(self, server, floating_ip):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 20561ae..be27024 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -74,7 +74,7 @@
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(floating_ip.delete)
+ self.addCleanup(self.delete_wrapper, floating_ip)
return floating_ip
def _add_floating_ip(self, server, floating_ip):
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 4905dbf..bf5d1f6 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from cinderclient import exceptions as cinder_exc
+
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
@@ -69,7 +71,8 @@
snap = volume_snapshots.create(volume_id=vol_id,
force=True,
display_name=snap_name)
- self.set_resource(snap.id, snap)
+ self.addCleanup_with_wait(self.volume_client.volume_snapshots, snap.id,
+ exc_type=cinder_exc.NotFound)
self.status_timeout(volume_snapshots,
snap.id,
'available')
@@ -100,8 +103,7 @@
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
floating_ip = self.compute_client.floating_ips.create()
- fip_name = data_utils.rand_name('scenario-fip')
- self.set_resource(fip_name, floating_ip)
+ self.addCleanup(self.delete_wrapper, floating_ip)
server.add_floating_ip(floating_ip)
ip = floating_ip.ip
else:
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 321b08b..f98ecff 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -122,7 +122,7 @@
return resp, self.deserialize(body)
- def _show_request(self, resource, uuid, permanent=False):
+ def _show_request(self, resource, uuid, permanent=False, **kwargs):
"""
Gets a specific object of the specified type.
@@ -130,7 +130,10 @@
:return: Serialized object as a dictionary.
"""
- uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
+ if 'uri' in kwargs:
+ uri = kwargs['uri']
+ else:
+ uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
resp, body = self.get(uri)
return resp, self.deserialize(body)
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 52479b5..61342eb 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -47,9 +47,9 @@
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
- def list_ports_detail(self):
+ def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
- return self._list_request('/ports/detail')
+ return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
@@ -239,3 +239,19 @@
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
+
+ @base.handle_errors
+ def validate_driver_interface(self, node_uuid):
+ """
+ Get all driver interfaces of a specific node.
+
+ :param uuid: Unique identifier of the node in UUID format.
+
+ """
+
+ uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
+ res='nodes',
+ uuid=node_uuid,
+ postf='validate')
+
+ return self._show_request('nodes', node_uuid, uri=uri)
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
index 7616a99..7af904b 100644
--- a/tempest/services/botoclients.py
+++ b/tempest/services/botoclients.py
@@ -37,13 +37,16 @@
*args, **kwargs):
# FIXME(andreaf) replace credentials and auth_url with auth_provider
+ insecure_ssl = CONF.identity.disable_ssl_certificate_validation
+
self.connection_timeout = str(CONF.boto.http_socket_timeout)
self.num_retries = str(CONF.boto.num_retries)
self.build_timeout = CONF.boto.build_timeout
self.ks_cred = {"username": username,
"password": password,
"auth_url": auth_url,
- "tenant_name": tenant_name}
+ "tenant_name": tenant_name,
+ "insecure": insecure_ssl}
def _keystone_aws_get(self):
# FIXME(andreaf) Move EC2 credentials to AuthProvider
@@ -90,7 +93,10 @@
self._config_boto_timeout(self.connection_timeout, self.num_retries)
if not all((self.connection_data["aws_access_key_id"],
self.connection_data["aws_secret_access_key"])):
- if all(self.ks_cred.itervalues()):
+ if all([self.ks_cred.get('auth_url'),
+ self.ks_cred.get('username'),
+ self.ks_cred.get('tenant_name'),
+ self.ks_cred.get('password')]):
ec2_cred = self._keystone_aws_get()
self.connection_data["aws_access_key_id"] = \
ec2_cred.access
@@ -109,6 +115,7 @@
def __init__(self, *args, **kwargs):
super(APIClientEC2, self).__init__(*args, **kwargs)
+ insecure_ssl = CONF.identity.disable_ssl_certificate_validation
aws_access = CONF.boto.aws_access
aws_secret = CONF.boto.aws_secret
purl = urlparse.urlparse(CONF.boto.ec2_url)
@@ -129,6 +136,7 @@
self.connection_data = {"aws_access_key_id": aws_access,
"aws_secret_access_key": aws_secret,
"is_secure": purl.scheme == "https",
+ "validate_certs": not insecure_ssl,
"region": region,
"host": purl.hostname,
"port": port,
@@ -187,6 +195,7 @@
def __init__(self, *args, **kwargs):
super(ObjectClientS3, self).__init__(*args, **kwargs)
+ insecure_ssl = CONF.identity.disable_ssl_certificate_validation
aws_access = CONF.boto.aws_access
aws_secret = CONF.boto.aws_secret
purl = urlparse.urlparse(CONF.boto.s3_url)
@@ -201,6 +210,7 @@
self.connection_data = {"aws_access_key_id": aws_access,
"aws_secret_access_key": aws_secret,
"is_secure": purl.scheme == "https",
+ "validate_certs": not insecure_ssl,
"host": purl.hostname,
"port": port,
"calling_format": boto.s3.connection.
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
index 98d8896..4f6602f 100644
--- a/tempest/services/compute/json/agents_client.py
+++ b/tempest/services/compute/json/agents_client.py
@@ -46,7 +46,9 @@
"""Create an agent build."""
post_body = json.dumps({'agent': kwargs})
resp, body = self.post('os-agents', post_body)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(schema.create_agent, resp, body)
+ return resp, body['agent']
def delete_agent(self, agent_id):
"""Delete an existing agent build."""
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 92b4ddf..0028eea 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -137,4 +137,5 @@
post_body = json.dumps({'ip_range': ip_range})
resp, body = self.put('os-floating-ips-bulk/delete', post_body)
body = json.loads(body)
+ self.validate_response(schema.delete_floating_ips_bulk, resp, body)
return resp, body['floating_ips_bulk_delete']
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 23c1e64..69d2f35 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -134,6 +134,7 @@
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
+ self.validate_response(schema.get_server, resp, body)
return resp, body['server']
def delete_server(self, server_id):
@@ -432,8 +433,9 @@
return self.action(server_id, 'shelveOffload', None, **kwargs)
def get_console_output(self, server_id, length):
+ kwargs = {'length': length} if length else {}
return self.action(server_id, 'os-getConsoleOutput', 'output',
- common_schema.get_console_output, length=length)
+ common_schema.get_console_output, **kwargs)
def list_virtual_interfaces(self, server_id):
"""
@@ -523,6 +525,7 @@
"""List the server-groups."""
resp, body = self.get("os-server-groups")
body = json.loads(body)
+ self.validate_response(schema.list_server_groups, resp, body)
return resp, body['server_groups']
def get_server_group(self, server_group_id):
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index 48be54c..31314b7 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -43,7 +43,9 @@
"""Create an agent build."""
post_body = json.dumps({'agent': kwargs})
resp, body = self.post('os-agents', post_body)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(schema.create_agent, resp, body)
+ return resp, body['agent']
def delete_agent(self, agent_id):
"""Delete an existing agent build."""
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 11258a6..d933998 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -136,6 +136,7 @@
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
+ self.validate_response(schema.get_server, resp, body)
return resp, body['server']
def delete_server(self, server_id):
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 626e655..156d889 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -594,8 +594,9 @@
return resp, body
def get_console_output(self, server_id, length):
+ kwargs = {'length': length} if length else {}
return self.action(server_id, 'os-getConsoleOutput', 'output',
- length=length)
+ **kwargs)
def list_virtual_interfaces(self, server_id):
"""
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index e22cd9c..4a7c163 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -22,6 +22,7 @@
from tempest.common import glance_http
from tempest.common import rest_client
+from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -153,6 +154,7 @@
return self._create_with_data(headers, kwargs.get('data'))
resp, body = self.post('v1/images', None, headers)
+ self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['image']
@@ -176,12 +178,15 @@
url = 'v1/images/%s' % image_id
resp, body = self.put(url, data, headers)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['image']
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
- return self.delete(url)
+ resp, body = self.delete(url)
+ self.expected_success(200, resp.status)
+ return resp, body
def image_list(self, **kwargs):
url = 'v1/images'
@@ -190,6 +195,7 @@
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
@@ -210,18 +216,21 @@
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
+ self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return resp, body
def get_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
return resp, body
def is_resource_deleted(self, id):
@@ -234,12 +243,14 @@
def get_image_membership(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def get_shared_images(self, member_id):
url = 'v1/shared-images/%s' % member_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -249,20 +260,15 @@
if can_share:
body = json.dumps({'member': {'can_share': True}})
resp, __ = self.put(url, body)
+ self.expected_success(204, resp.status)
return resp
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
+ self.expected_success(204, resp.status)
return resp
- def replace_membership_list(self, image_id, member_list):
- url = 'v1/images/%s/members' % image_id
- body = json.dumps({'membership': member_list})
- resp, data = self.put(url, body)
- data = json.loads(data)
- return resp, data
-
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
resp, meta = self.get_image_meta(image_id)
@@ -292,6 +298,9 @@
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index 201869e..c420df9 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -61,6 +61,7 @@
headers = {"Content-Type": "application/openstack-images-v2.0"
"-json-patch"}
resp, body = self.patch('v2/images/%s' % image_id, data, headers)
+ self.expected_success(200, resp.status)
return resp, self._parse_resp(body)
def create_image(self, name, container_format, disk_format, **kwargs):
@@ -81,12 +82,14 @@
self._validate_schema(data)
resp, body = self.post('v2/images', data)
+ self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
def delete_image(self, image_id):
url = 'v2/images/%s' % image_id
- self.delete(url)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp.status)
def image_list(self, params=None):
url = 'v2/images'
@@ -95,6 +98,7 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
self._validate_schema(body, type='images')
return resp, body['images']
@@ -102,6 +106,7 @@
def get_image(self, image_id):
url = 'v2/images/%s' % image_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -117,36 +122,40 @@
headers = {'Content-Type': 'application/octet-stream'}
resp, body = self.http.raw_request('PUT', url, headers=headers,
body=data)
+ self.expected_success(204, resp.status)
return resp, body
def get_image_file(self, image_id):
url = 'v2/images/%s/file' % image_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
return resp, body
def add_image_tag(self, image_id, tag):
url = 'v2/images/%s/tags/%s' % (image_id, tag)
resp, body = self.put(url, body=None)
+ self.expected_success(204, resp.status)
return resp, body
def delete_image_tag(self, image_id, tag):
url = 'v2/images/%s/tags/%s' % (image_id, tag)
resp, _ = self.delete(url)
+ self.expected_success(204, resp.status)
return resp
def get_image_membership(self, image_id):
url = 'v2/images/%s/members' % image_id
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
- self.expected_success(200, resp)
return resp, body
def add_member(self, image_id, member_id):
url = 'v2/images/%s/members' % image_id
data = json.dumps({'member': member_id})
resp, body = self.post(url, data)
+ self.expected_success(200, resp.status)
body = json.loads(body)
- self.expected_success(200, resp)
return resp, body
def update_member_status(self, image_id, member_id, status):
@@ -154,24 +163,25 @@
url = 'v2/images/%s/members/%s' % (image_id, member_id)
data = json.dumps({'status': status})
resp, body = self.put(url, data)
+ self.expected_success(200, resp.status)
body = json.loads(body)
- self.expected_success(200, resp)
return resp, body
def get_member(self, image_id, member_id):
url = 'v2/images/%s/members/%s' % (image_id, member_id)
resp, body = self.get(url)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
return resp, json.loads(body)
def remove_member(self, image_id, member_id):
url = 'v2/images/%s/members/%s' % (image_id, member_id)
resp, _ = self.delete(url)
- self.expected_success(204, resp)
+ self.expected_success(204, resp.status)
return resp
def get_schema(self, schema):
url = 'v2/schemas/%s' % schema
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 81792c4..4ee8302 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -28,6 +28,7 @@
'vips': 'lb',
'health_monitors': 'lb',
'members': 'lb',
+ 'ipsecpolicies': 'vpn',
'vpnservices': 'vpn',
'ikepolicies': 'vpn',
'ipsecpolicies': 'vpn',
@@ -47,6 +48,7 @@
resource_plural_map = {
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
+ 'ipsecpolicy': 'ipsecpolicies',
'ikepolicy': 'ikepolicies',
'ipsecpolicy': 'ipsecpolicies',
'quotas': 'quotas',
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index f3f4eb6..b2f8205 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -13,7 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import httplib
import urllib
+import urlparse
from tempest.common import http
from tempest.common import rest_client
@@ -143,6 +145,31 @@
resp, body = self.put(url, data)
return resp, body
+ def put_object_with_chunk(self, container, name, contents, chunk_size):
+ """
+ Put an object with Transfer-Encoding header
+ """
+ if self.base_url is None:
+ self._set_auth()
+
+ headers = {'Transfer-Encoding': 'chunked'}
+ if self.token:
+ headers['X-Auth-Token'] = self.token
+
+ conn = put_object_connection(self.base_url, container, name, contents,
+ chunk_size, headers)
+
+ resp = conn.getresponse()
+ body = resp.read()
+
+ resp_headers = {}
+ for header, value in resp.getheaders():
+ resp_headers[header.lower()] = value
+
+ self._error_checker('PUT', None, headers, contents, resp, body)
+
+ return resp.status, resp.reason, resp_headers
+
class ObjectClientCustomizedHeader(rest_client.RestClient):
@@ -220,3 +247,89 @@
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.delete(url, headers=headers)
return resp, body
+
+ def create_object_continue(self, container, object_name,
+ data, metadata=None):
+ """Create storage object."""
+ headers = {}
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
+
+ if not data:
+ headers['content-length'] = '0'
+
+ if self.base_url is None:
+ self._set_auth()
+ headers['X-Auth-Token'] = self.token
+
+ conn = put_object_connection(self.base_url, str(container),
+ str(object_name), data, None, headers)
+
+ response = conn.response_class(conn.sock,
+ strict=conn.strict,
+ method=conn._method)
+ version, status, reason = response._read_status()
+ resp = {'version': version,
+ 'status': str(status),
+ 'reason': reason}
+
+ return resp
+
+
+def put_object_connection(base_url, container, name, contents=None,
+ chunk_size=65536, headers=None, query_string=None):
+ """
+ Helper function to make connection to put object with httplib
+ :param base_url: base_url of an object client
+ :param container: container name that the object is in
+ :param name: object name to put
+ :param contents: a string or a file like object to read object data
+ from; if None, a zero-byte put will be done
+ :param chunk_size: chunk size of data to write; it defaults to 65536;
+ used only if the the contents object has a 'read'
+ method, eg. file-like objects, ignored otherwise
+ :param headers: additional headers to include in the request, if any
+ :param query_string: if set will be appended with '?' to generated path
+ """
+ parsed = urlparse.urlparse(base_url)
+ if parsed.scheme == 'https':
+ conn = httplib.HTTPSConnection(parsed.netloc)
+ else:
+ conn = httplib.HTTPConnection(parsed.netloc)
+ path = str(parsed.path) + "/"
+ path += "%s/%s" % (str(container), str(name))
+
+ if query_string:
+ path += '?' + query_string
+ if headers:
+ headers = dict(headers)
+ else:
+ headers = {}
+ if hasattr(contents, 'read'):
+ conn.putrequest('PUT', path)
+ for header, value in headers.iteritems():
+ conn.putheader(header, value)
+ if 'Content-Length' not in headers:
+ if 'Transfer-Encoding' not in headers:
+ conn.putheader('Transfer-Encoding', 'chunked')
+ conn.endheaders()
+ chunk = contents.read(chunk_size)
+ while chunk:
+ conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
+ chunk = contents.read(chunk_size)
+ conn.send('0\r\n\r\n')
+ else:
+ conn.endheaders()
+ left = headers['Content-Length']
+ while left > 0:
+ size = chunk_size
+ if size > left:
+ size = left
+ chunk = contents.read(size)
+ conn.send(chunk)
+ left -= len(chunk)
+ else:
+ conn.request('PUT', path, contents, headers)
+
+ return conn
diff --git a/tempest/services/queuing/json/queuing_client.py b/tempest/services/queuing/json/queuing_client.py
index e5978f5..031c9c6 100644
--- a/tempest/services/queuing/json/queuing_client.py
+++ b/tempest/services/queuing/json/queuing_client.py
@@ -14,11 +14,14 @@
# limitations under the License.
import json
+import urllib
from tempest.api_schema.queuing.v1 import queues as queues_schema
from tempest.common import rest_client
+from tempest.common.utils import data_utils
from tempest import config
+
CONF = config.CONF
@@ -30,11 +33,16 @@
self.version = '1'
self.uri_prefix = 'v{0}'.format(self.version)
+ client_id = data_utils.rand_uuid_hex()
+ self.headers = {'Client-ID': client_id}
+
def list_queues(self):
uri = '{0}/queues'.format(self.uri_prefix)
resp, body = self.get(uri)
- body = json.loads(body)
- self.validate_response(queues_schema.list_queues, resp, body)
+
+ if resp['status'] != '204':
+ body = json.loads(body)
+ self.validate_response(queues_schema.list_queues, resp, body)
return resp, body
def create_queue(self, queue_name):
@@ -74,3 +82,80 @@
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=json.dumps(rbody))
return resp, body
+
+ def post_messages(self, queue_name, rbody):
+ uri = '{0}/queues/{1}/messages'.format(self.uri_prefix, queue_name)
+ resp, body = self.post(uri, body=json.dumps(rbody),
+ extra_headers=True,
+ headers=self.headers)
+
+ body = json.loads(body)
+ return resp, body
+
+ def list_messages(self, queue_name):
+ uri = '{0}/queues/{1}/messages?echo=True'.format(self.uri_prefix,
+ queue_name)
+ resp, body = self.get(uri, extra_headers=True, headers=self.headers)
+
+ if resp['status'] != '204':
+ body = json.loads(body)
+ self.validate_response(queues_schema.list_messages, resp, body)
+
+ return resp, body
+
+ def get_single_message(self, message_uri):
+ resp, body = self.get(message_uri, extra_headers=True,
+ headers=self.headers)
+ if resp['status'] != '204':
+ body = json.loads(body)
+ self.validate_response(queues_schema.get_single_message, resp,
+ body)
+ return resp, body
+
+ def get_multiple_messages(self, message_uri):
+ resp, body = self.get(message_uri, extra_headers=True,
+ headers=self.headers)
+
+ if resp['status'] != '204':
+ body = json.loads(body)
+ self.validate_response(queues_schema.get_multiple_messages,
+ resp,
+ body)
+
+ return resp, body
+
+ def delete_messages(self, message_uri):
+ resp, body = self.delete(message_uri)
+ assert(resp['status'] == '204')
+ return resp, body
+
+ def post_claims(self, queue_name, rbody, url_params=False):
+ uri = '{0}/queues/{1}/claims'.format(self.uri_prefix, queue_name)
+ if url_params:
+ uri += '?%s' % urllib.urlencode(url_params)
+
+ resp, body = self.post(uri, body=json.dumps(rbody),
+ extra_headers=True,
+ headers=self.headers)
+
+ body = json.loads(body)
+ self.validate_response(queues_schema.claim_messages, resp, body)
+ return resp, body
+
+ def query_claim(self, claim_uri):
+ resp, body = self.get(claim_uri)
+
+ if resp['status'] != '204':
+ body = json.loads(body)
+ self.validate_response(queues_schema.query_claim, resp, body)
+ return resp, body
+
+ def update_claim(self, claim_uri, rbody):
+ resp, body = self.patch(claim_uri, body=json.dumps(rbody))
+ assert(resp['status'] == '204')
+ return resp, body
+
+ def release_claim(self, claim_uri):
+ resp, body = self.delete(claim_uri)
+ assert(resp['status'] == '204')
+ return resp, body
diff --git a/tempest/test.py b/tempest/test.py
index 650fad7..afe7a96 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -108,6 +108,7 @@
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
'ceilometer': CONF.service_available.ceilometer,
+ 'data_processing': CONF.service_available.sahara
}
def decorator(f):
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
index bdb9269..1e2925b 100644
--- a/tempest/tests/test_commands.py
+++ b/tempest/tests/test_commands.py
@@ -47,7 +47,8 @@
@mock.patch('subprocess.Popen')
def test_iptables_raw(self, mock):
table = 'filter'
- expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
+ expected = ['/usr/bin/sudo', '-n', 'iptables', '--line-numbers',
+ '-L', '-nv', '-t',
'%s' % table]
commands.iptables_raw(table)
mock.assert_called_once_with(expected, **self.subprocess_args)
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index ab81836..52fdf7e 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -70,9 +70,15 @@
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/api/compute/test_fake.py',
"@test.services('image')"))
+ self.assertFalse(checks.scenario_tests_need_service_tags(
+ 'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
+ "@test.services('compute')"))
self.assertTrue(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/scenario/test_fake.py',
'\n'))
+ self.assertTrue(checks.scenario_tests_need_service_tags(
+ 'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
+ "\n"))
def test_no_vi_headers(self):
# NOTE(mtreinish) The lines parameter is used only for finding the
@@ -93,3 +99,11 @@
'./tempest/scenario/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
"@test.services('compute')", './tempest/api/image/fake_test.py'))
+
+ def test_no_official_client_manager_in_api_tests(self):
+ self.assertTrue(checks.no_official_client_manager_in_api_tests(
+ "cls.official_client = clients.OfficialClientManager(credentials)",
+ "tempest/api/compute/base.py"))
+ self.assertFalse(checks.no_official_client_manager_in_api_tests(
+ "cls.official_client = clients.OfficialClientManager(credentials)",
+ "tempest/scenario/fake_test.py"))
diff --git a/tempest/tests/test_ssh.py b/tempest/tests/test_ssh.py
index 0da52dc..27cd6b5 100644
--- a/tempest/tests/test_ssh.py
+++ b/tempest/tests/test_ssh.py
@@ -36,13 +36,13 @@
rsa_mock.assert_called_once_with(mock.sentinel.csio)
cs_mock.assert_called_once_with('mykey')
rsa_mock.reset_mock()
- cs_mock.rest_mock()
+ cs_mock.reset_mock()
pkey = mock.sentinel.pkey
# Shouldn't call out to load a file from RSAKey, since
# a sentinel isn't a basestring...
ssh.Client('localhost', 'root', pkey=pkey)
- rsa_mock.assert_not_called()
- cs_mock.assert_not_called()
+ self.assertEqual(0, rsa_mock.call_count)
+ self.assertEqual(0, cs_mock.call_count)
def _set_ssh_connection_mocks(self):
client_mock = mock.MagicMock()
@@ -75,7 +75,7 @@
password=None
)]
self.assertEqual(expected_connect, client_mock.connect.mock_calls)
- s_mock.assert_not_called()
+ self.assertEqual(0, s_mock.call_count)
def test_get_ssh_connection_two_attemps(self):
c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
@@ -90,8 +90,8 @@
start_time = int(time.time())
client._get_ssh_connection(sleep=1)
end_time = int(time.time())
- self.assertTrue((end_time - start_time) < 3)
- self.assertTrue((end_time - start_time) > 1)
+ self.assertLess((end_time - start_time), 4)
+ self.assertGreater((end_time - start_time), 1)
def test_get_ssh_connection_timeout(self):
c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
@@ -108,8 +108,8 @@
with testtools.ExpectedException(exceptions.SSHTimeout):
client._get_ssh_connection()
end_time = int(time.time())
- self.assertTrue((end_time - start_time) < 4)
- self.assertTrue((end_time - start_time) >= 2)
+ self.assertLess((end_time - start_time), 5)
+ self.assertGreaterEqual((end_time - start_time), 2)
def test_exec_command(self):
gsc_mock = self.patch('tempest.common.ssh.Client._get_ssh_connection')
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index 485beff..bbc3d15 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -42,6 +42,8 @@
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request',
fake_identity._fake_v2_response)
+ cfg.CONF.set_default('operator_role', 'FakeRole',
+ group='object-storage')
def test_tempest_client(self):
iso_creds = isolated_creds.IsolatedCreds('test class')
@@ -92,6 +94,31 @@
{'id': id, 'name': name})))
return tenant_fix
+ def _mock_list_roles(self, id, name):
+ roles_fix = self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'list_roles',
+ return_value=({'status': 200},
+ [{'id': id, 'name': name},
+ {'id': '1', 'name': 'FakeRole'}])))
+ return roles_fix
+
+ def _mock_assign_user_role(self):
+ tenant_fix = self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'assign_user_role',
+ return_value=({'status': 200},
+ {})))
+ return tenant_fix
+
+ def _mock_list_role(self):
+ roles_fix = self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'list_roles',
+ return_value=({'status': 200},
+ [{'id': '1', 'name': 'FakeRole'}])))
+ return roles_fix
+
def _mock_network_create(self, iso_creds, id, name):
net_fix = self.useFixture(mockpatch.PatchObject(
iso_creds.network_admin_client,
@@ -121,6 +148,8 @@
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
primary_creds = iso_creds.get_primary_creds()
@@ -135,13 +164,9 @@
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_list_roles('1234', 'admin')
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
- self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClientJSON,
- 'list_roles',
- return_value=({'status': 200},
- [{'id': '1234', 'name': 'admin'}])))
user_mock = mock.patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role')
@@ -150,7 +175,9 @@
with mock.patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role') as user_mock:
admin_creds = iso_creds.get_admin_creds()
- user_mock.assert_called_once_with('1234', '1234', '1234')
+ user_mock.assert_has_calls([
+ mock.call('1234', '1234', '1'),
+ mock.call('1234', '1234', '1234')])
self.assertEqual(admin_creds.username, 'fake_admin_user')
self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
# Verify IDs
@@ -162,6 +189,8 @@
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_assign_user_role()
+ roles_fix = self._mock_list_role()
tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
user_fix = self._mock_user_create('1234', 'fake_prim_user')
iso_creds.get_primary_creds()
@@ -172,16 +201,11 @@
iso_creds.get_alt_creds()
tenant_fix.cleanUp()
user_fix.cleanUp()
+ roles_fix.cleanUp()
tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
user_fix = self._mock_user_create('123456', 'fake_admin_user')
- self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClientJSON,
- 'list_roles',
- return_value=({'status': 200},
- [{'id': '123456', 'name': 'admin'}])))
- with mock.patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role'):
- iso_creds.get_admin_creds()
+ self._mock_list_roles('123456', 'admin')
+ iso_creds.get_admin_creds()
user_mock = self.patch(
'tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_user')
@@ -209,6 +233,8 @@
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
alt_creds = iso_creds.get_alt_creds()
@@ -222,6 +248,8 @@
def test_network_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_network_create(iso_creds, '1234', 'fake_net')
@@ -247,6 +275,8 @@
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
# Create primary tenant and network
+ self._mock_assign_user_role()
+ roles_fix = self._mock_list_role()
user_fix = self._mock_user_create('1234', 'fake_prim_user')
tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
net_fix = self._mock_network_create(iso_creds, '1234', 'fake_net')
@@ -278,6 +308,7 @@
net_fix.cleanUp()
subnet_fix.cleanUp()
router_fix.cleanUp()
+ roles_fix.cleanUp()
# Create admin tenant and networks
user_fix = self._mock_user_create('123456', 'fake_admin_user')
tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
@@ -286,14 +317,8 @@
subnet_fix = self._mock_subnet_create(iso_creds, '123456',
'fake_admin_subnet')
router_fix = self._mock_router_create('123456', 'fake_admin_router')
- self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClientJSON,
- 'list_roles',
- return_value=({'status': 200},
- [{'id': '123456', 'name': 'admin'}])))
- with mock.patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role'):
- iso_creds.get_admin_creds()
+ self._mock_list_roles('123456', 'admin')
+ iso_creds.get_admin_creds()
self.patch('tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_user')
self.patch('tempest.services.identity.json.identity_client.'
@@ -348,6 +373,8 @@
def test_network_alt_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
self._mock_network_create(iso_creds, '1234', 'fake_alt_net')
@@ -372,6 +399,7 @@
def test_network_admin_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
+ self._mock_assign_user_role()
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
self._mock_network_create(iso_creds, '1234', 'fake_admin_net')
@@ -380,14 +408,8 @@
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
- self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClientJSON,
- 'list_roles',
- return_value=({'status': 200},
- [{'id': '123456', 'name': 'admin'}])))
- with mock.patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role'):
- iso_creds.get_admin_creds()
+ self._mock_list_roles('123456', 'admin')
+ iso_creds.get_admin_creds()
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_admin_network()
subnet = iso_creds.get_admin_subnet()
@@ -410,6 +432,8 @@
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
net = mock.patch.object(iso_creds.network_admin_client,
@@ -444,6 +468,8 @@
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(exceptions.InvalidConfiguration,
@@ -460,6 +486,8 @@
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(exceptions.InvalidConfiguration,
@@ -476,6 +504,8 @@
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
+ self._mock_assign_user_role()
+ self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(exceptions.InvalidConfiguration,
diff --git a/test-requirements.txt b/test-requirements.txt
index 215f28b..13ef291 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -8,3 +8,4 @@
mock>=1.0
coverage>=3.6
oslotest
+stevedore>=0.14
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
index 528bd5b..0f0d77e 100755
--- a/tools/config/check_uptodate.sh
+++ b/tools/config/check_uptodate.sh
@@ -16,6 +16,10 @@
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
+if [ $? != 0 ]
+then
+ exit 1
+fi
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
index 20ddfbb..d22b2f0 100755
--- a/tools/config/generate_sample.sh
+++ b/tools/config/generate_sample.sh
@@ -1,5 +1,15 @@
#!/usr/bin/env bash
+# Generate sample configuration for your project.
+#
+# Aside from the command line flags, it also respects a config file which
+# should be named oslo.config.generator.rc and be placed in the same directory.
+#
+# You can then export the following variables:
+# TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES: list of modules to interrogate for options.
+# TEMPEST_CONFIG_GENERATOR_EXTRA_LIBRARIES: list of libraries to discover.
+# TEMPEST_CONFIG_GENERATOR_EXCLUDED_FILES: list of files to remove from automatic listing.
+
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
@@ -95,6 +105,10 @@
source "$RC_FILE"
fi
+for filename in ${TEMPEST_CONFIG_GENERATOR_EXCLUDED_FILES}; do
+ FILES="${FILES[@]/$filename/}"
+done
+
for mod in ${TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
@@ -111,6 +125,11 @@
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
+if [ $? != 0 ]
+then
+ echo "Can not generate $OUTPUTFILE"
+ exit 1
+fi
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
diff --git a/tools/config/oslo.config.generator.rc b/tools/config/oslo.config.generator.rc
new file mode 100644
index 0000000..303e156
--- /dev/null
+++ b/tools/config/oslo.config.generator.rc
@@ -0,0 +1 @@
+MODULEPATH=tempest.common.generate_sample_tempest
diff --git a/tools/generate_sample.sh b/tools/generate_sample.sh
deleted file mode 100755
index 9b312c9..0000000
--- a/tools/generate_sample.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-MODULEPATH=tempest.common.generate_sample_tempest tools/config/generate_sample.sh $@
diff --git a/tox.ini b/tox.ini
index c1acde9..7f69fad 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = pep8
+envlist = pep8,py27
minversion = 1.6
skipsdist = True
@@ -85,7 +85,6 @@
-r{toxinidir}/test-requirements.txt
[testenv:pep8]
-setenv = MODULEPATH=tempest.common.generate_sample_tempest
commands =
flake8 {posargs}
{toxinidir}/tools/config/check_uptodate.sh