Merge "ssh_floating verify reboot"
diff --git a/requirements.txt b/requirements.txt
index fe3e5e5..5e396c6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@
paramiko>=1.9.0
netaddr>=0.7.6
python-glanceclient>=0.9.0
-python-keystoneclient>=0.7.0
+python-keystoneclient>=0.8.0
python-novaclient>=2.17.0
python-neutronclient>=2.3.4,<3
python-cinderclient>=1.0.6
diff --git a/setup.cfg b/setup.cfg
index a701572..f4aa3e1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,6 +17,10 @@
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
+[entry_points]
+console_scripts =
+ verify-tempest-config = tempest.cmd.verify_tempest_config:main
+
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/setup.py b/setup.py
index 70c2b3f..7363757 100755
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,14 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index a65d7b7..edeb2fc 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -30,10 +30,12 @@
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
+ _api_version = 3
force_tenant_isolation = False
@classmethod
def setUpClass(cls):
+ cls.set_network_resources()
super(BaseComputeTest, cls).setUpClass()
os = cls.get_client_manager()
@@ -53,6 +55,57 @@
cls.multi_user = cls.get_multi_user()
cls.security_groups = []
+ if cls._api_version == 2:
+ cls.servers_client = cls.os.servers_client
+ cls.flavors_client = cls.os.flavors_client
+ cls.images_client = cls.os.images_client
+ cls.extensions_client = cls.os.extensions_client
+ cls.floating_ips_client = cls.os.floating_ips_client
+ cls.keypairs_client = cls.os.keypairs_client
+ cls.security_groups_client = cls.os.security_groups_client
+ cls.quotas_client = cls.os.quotas_client
+ cls.limits_client = cls.os.limits_client
+ cls.volumes_extensions_client = cls.os.volumes_extensions_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.interfaces_client = cls.os.interfaces_client
+ cls.fixed_ips_client = cls.os.fixed_ips_client
+ cls.availability_zone_client = cls.os.availability_zone_client
+ cls.agents_client = cls.os.agents_client
+ cls.aggregates_client = cls.os.aggregates_client
+ cls.services_client = cls.os.services_client
+ cls.instance_usages_audit_log_client = \
+ cls.os.instance_usages_audit_log_client
+ cls.hypervisor_client = cls.os.hypervisor_client
+ cls.certificates_client = cls.os.certificates_client
+ cls.migrations_client = cls.os.migrations_client
+
+ elif cls._api_version == 3:
+ if not CONF.compute_feature_enabled.api_v3:
+ skip_msg = ("%s skipped as nova v3 api is not available" %
+ cls.__name__)
+ raise cls.skipException(skip_msg)
+ cls.servers_client = cls.os.servers_v3_client
+ cls.images_client = cls.os.image_client
+ cls.flavors_client = cls.os.flavors_v3_client
+ cls.services_client = cls.os.services_v3_client
+ cls.extensions_client = cls.os.extensions_v3_client
+ cls.availability_zone_client = cls.os.availability_zone_v3_client
+ cls.interfaces_client = cls.os.interfaces_v3_client
+ cls.hypervisor_client = cls.os.hypervisor_v3_client
+ cls.keypairs_client = cls.os.keypairs_v3_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.certificates_client = cls.os.certificates_v3_client
+ cls.keypairs_client = cls.os.keypairs_v3_client
+ cls.aggregates_client = cls.os.aggregates_v3_client
+ cls.hosts_client = cls.os.hosts_v3_client
+ cls.quotas_client = cls.os.quotas_v3_client
+ cls.version_client = cls.os.version_v3_client
+ cls.migrations_client = cls.os.migrations_v3_client
+ else:
+ msg = ("Unexpected API version is specified (%s)" %
+ cls._api_version)
+ raise exceptions.InvalidConfiguration(message=msg)
+
@classmethod
def get_multi_user(cls):
multi_user = True
@@ -91,6 +144,26 @@
pass
@classmethod
+ def server_check_teardown(cls):
+ """Checks is the shared server clean enough for subsequent test.
+ Method will delete the server when it's dirty.
+ The setUp method is responsible for creating a new server.
+ Exceptions raised in tearDown class are fails the test case,
+ This method supposed to use only by tierDown methods, when
+ the shared server_id is stored in the server_id of the class.
+ """
+ if getattr(cls, 'server_id', None) is not None:
+ try:
+ cls.servers_client.wait_for_server_status(cls.server_id,
+ 'ACTIVE')
+ except Exception as exc:
+ LOG.exception(exc)
+ cls.servers_client.delete_server(cls.server_id)
+ cls.servers_client.wait_for_server_termination(cls.server_id)
+ cls.server_id = None
+ raise
+
+ @classmethod
def clear_images(cls):
for image_id in cls.images:
try:
@@ -210,39 +283,6 @@
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
-
-class BaseV2ComputeTest(BaseComputeTest):
-
- _interface = "json"
-
- @classmethod
- def setUpClass(cls):
- # By default compute tests do not create network resources
- cls.set_network_resources()
- super(BaseV2ComputeTest, cls).setUpClass()
- cls.servers_client = cls.os.servers_client
- cls.flavors_client = cls.os.flavors_client
- cls.images_client = cls.os.images_client
- cls.extensions_client = cls.os.extensions_client
- cls.floating_ips_client = cls.os.floating_ips_client
- cls.keypairs_client = cls.os.keypairs_client
- cls.security_groups_client = cls.os.security_groups_client
- cls.quotas_client = cls.os.quotas_client
- cls.limits_client = cls.os.limits_client
- cls.volumes_extensions_client = cls.os.volumes_extensions_client
- cls.volumes_client = cls.os.volumes_client
- cls.interfaces_client = cls.os.interfaces_client
- cls.fixed_ips_client = cls.os.fixed_ips_client
- cls.availability_zone_client = cls.os.availability_zone_client
- cls.agents_client = cls.os.agents_client
- cls.aggregates_client = cls.os.aggregates_client
- cls.services_client = cls.os.services_client
- cls.instance_usages_audit_log_client = \
- cls.os.instance_usages_audit_log_client
- cls.hypervisor_client = cls.os.hypervisor_client
- cls.certificates_client = cls.os.certificates_client
- cls.migrations_client = cls.os.migrations_client
-
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
@@ -250,21 +290,25 @@
if 'name' in kwargs:
name = kwargs.pop('name')
- resp, image = cls.images_client.create_image(
- server_id, name)
+ if cls._api_version == 2:
+ resp, image = cls.images_client.create_image(server_id, name)
+ elif cls._api_version == 3:
+ resp, image = cls.servers_client.create_image(server_id, name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
- resp, image = cls.images_client.get_image(image_id)
+ if cls._api_version == 2:
+ resp, image = cls.images_client.get_image(image_id)
+ elif cls._api_version == 3:
+ resp, image = cls.images_client.get_image_meta(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
-
return resp, image
@classmethod
@@ -278,13 +322,24 @@
LOG.exception('Failed to delete server %s' % server_id)
pass
resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
- cls.password = server['adminPass']
+ if cls._api_version == 2:
+ cls.password = server['adminPass']
+ elif cls._api_version == 3:
+ cls.password = server['admin_password']
return server['id']
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_extensions_client, volume_id)
+ if cls._api_version == 2:
+ cls._delete_volume(cls.volumes_extensions_client, volume_id)
+ elif cls._api_version == 3:
+ cls._delete_volume(cls.volumes_client, volume_id)
+
+
+class BaseV2ComputeTest(BaseComputeTest):
+ _api_version = 2
+ _interface = "json"
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
@@ -313,75 +368,9 @@
class BaseV3ComputeTest(BaseComputeTest):
-
+ _api_version = 3
_interface = "json"
- @classmethod
- def setUpClass(cls):
- # By default compute tests do not create network resources
- if not CONF.compute_feature_enabled.api_v3:
- skip_msg = ("%s skipped as nova v3 api is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- cls.set_network_resources()
- super(BaseV3ComputeTest, cls).setUpClass()
-
- cls.servers_client = cls.os.servers_v3_client
- cls.images_client = cls.os.image_client
- cls.flavors_client = cls.os.flavors_v3_client
- cls.services_client = cls.os.services_v3_client
- cls.extensions_client = cls.os.extensions_v3_client
- cls.availability_zone_client = cls.os.availability_zone_v3_client
- cls.interfaces_client = cls.os.interfaces_v3_client
- cls.hypervisor_client = cls.os.hypervisor_v3_client
- cls.keypairs_client = cls.os.keypairs_v3_client
- cls.volumes_client = cls.os.volumes_client
- cls.certificates_client = cls.os.certificates_v3_client
- cls.keypairs_client = cls.os.keypairs_v3_client
- cls.aggregates_client = cls.os.aggregates_v3_client
- cls.hosts_client = cls.os.hosts_v3_client
- cls.quotas_client = cls.os.quotas_v3_client
- cls.version_client = cls.os.version_v3_client
- cls.migrations_client = cls.os.migrations_v3_client
-
- @classmethod
- def create_image_from_server(cls, server_id, **kwargs):
- """Wrapper utility that returns an image created from the server."""
- name = data_utils.rand_name(cls.__name__ + "-image")
- if 'name' in kwargs:
- name = kwargs.pop('name')
-
- resp, image = cls.servers_client.create_image(
- server_id, name)
- image_id = data_utils.parse_image_id(resp['location'])
- cls.images.append(image_id)
-
- if 'wait_until' in kwargs:
- cls.images_client.wait_for_image_status(image_id,
- kwargs['wait_until'])
- resp, image = cls.images_client.get_image_meta(image_id)
-
- return resp, image
-
- @classmethod
- def rebuild_server(cls, server_id, **kwargs):
- # Destroy an existing server and creates a new one
- try:
- cls.servers_client.delete_server(server_id)
- cls.servers_client.wait_for_server_termination(server_id)
- except Exception:
- LOG.exception('Failed to delete server %s' % server_id)
- pass
- resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
- cls.password = server['admin_password']
- return server['id']
-
- @classmethod
- def delete_volume(cls, volume_id):
- """Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_client, volume_id)
-
class BaseV3ComputeAdminTest(BaseV3ComputeTest):
"""Base test case class for all Compute Admin API V3 tests."""
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index d2fd970..c81cec5 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -26,6 +26,11 @@
class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
+ def tearDown(self):
+ """Terminate test instances created after a test is executed."""
+ self.server_check_teardown()
+ super(ImagesOneServerTestJSON, self).tearDown()
+
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 41a0590..9c4ab00 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
+ self.server_check_teardown()
super(ImagesOneServerNegativeTestJSON, self).tearDown()
def setUp(self):
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 7b12555..cf9837f 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -17,15 +17,15 @@
from tempest import test
-class AZTestJSON(base.BaseV2ComputeTest):
-
+class AZV3Test(base.BaseComputeTest):
"""
Tests Availability Zone API List
"""
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(AZTestJSON, cls).setUpClass()
+ super(AZV3Test, cls).setUpClass()
cls.client = cls.availability_zone_client
@test.attr(type='gate')
@@ -36,5 +36,9 @@
self.assertTrue(len(availability_zone) > 0)
-class AZTestXML(AZTestJSON):
+class AZV2TestJSON(AZV3Test):
+ _api_version = 2
+
+
+class AZV2TestXML(AZV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 9d6a1c1..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -54,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['adminPass'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -117,25 +110,8 @@
def setUpClass(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
- cls.client = cls.servers_client
cls.flavor_client = cls.os_adm.flavors_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- accessIPv4=cls.accessIPv4,
- accessIPv6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['adminPass']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+ cls.client = cls.servers_client
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@@ -143,7 +119,7 @@
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 64
@@ -156,12 +132,12 @@
ram, vcpus, disk,
flavor_with_eph_disk_id,
ephemeral=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -174,18 +150,18 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
self.assertEqual(resp.status, 202)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
@@ -198,13 +174,18 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 1f2bca9..80e6008 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -42,6 +42,12 @@
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ _, server = self.client.get_server(self.server_id)
+ self.assertEqual(self.image_ref, server['image']['id'])
+ self.server_check_teardown()
+ super(ServerActionsTestJSON, self).tearDown()
+
@classmethod
def setUpClass(cls):
cls.prepare_instance_network()
@@ -126,7 +132,6 @@
metadata=meta,
personality=personality,
adminPass=password)
- self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -146,6 +151,8 @@
linux_client = remote_client.RemoteClient(server, self.ssh_user,
password)
linux_client.validate_authentication()
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, self.image_ref)
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
@@ -158,11 +165,7 @@
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
- self.addCleanup(self.client.start, self.server_id)
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
- self.addCleanup(self.client.wait_for_server_status, self.server_id,
- 'SHUTOFF')
- self.addCleanup(self.client.rebuild, self.server_id, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -176,6 +179,12 @@
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
+ # Restore to the original image (The tearDown will test it again)
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, old_image)
+ self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ self.client.start(self.server_id)
+
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ddfc1d5..b7e4e38 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -60,25 +60,6 @@
resp, server = self.create_test_server(personality=person)
self.assertEqual('202', resp['status'])
- @test.attr(type='gate')
- def test_create_server_with_existent_personality_file(self):
- # Any existing file that match specified file will be renamed to
- # include the bak extension appended with a time stamp
-
- # TODO(zhikunliu): will add validations when ssh instance validation
- # re-factor is ready
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
- resp, server = self.create_test_server(personality=personality,
- wait_until="ACTIVE")
- resp, image = self.create_image_from_server(server['id'],
- wait_until="ACTIVE")
- resp, server = self.create_test_server(image_id=image['id'],
- personality=personality,
- wait_until="ACTIVE")
- self.assertEqual('202', resp['status'])
-
class ServerPersonalityTestXML(ServerPersonalityTestJSON):
_interface = "xml"
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index cc801b5..5ac667e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -37,6 +37,10 @@
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ self.server_check_teardown()
+ super(ServersNegativeTestJSON, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ServersNegativeTestJSON, cls).setUpClass()
@@ -133,12 +137,11 @@
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
- self.addCleanup(self.client.unpause_server,
- self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
+ self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
@@ -350,13 +353,12 @@
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
- self.addCleanup(self.client.resume_server,
- self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
+ self.client.resume_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@@ -426,7 +428,6 @@
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
- self.addCleanup(self.client.unshelve_server, self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
@@ -448,6 +449,8 @@
self.client.shelve_server,
self.server_id)
+ self.client.unshelve_server(self.server_id)
+
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
diff --git a/tempest/api/compute/v2/__init__.py b/tempest/api/compute/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/api/compute/v2/__init__.py
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 3aab1e1..795437b 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -41,6 +41,11 @@
# Usually it means the server had a serious accident
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ """Terminate test instances created after a test is executed."""
+ self.server_check_teardown()
+ super(ImagesOneServerV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ImagesOneServerV3Test, cls).setUpClass()
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index 7679eee..eed81c6 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
+ self.server_check_teardown()
super(ImagesOneServerNegativeV3Test, self).tearDown()
def setUp(self):
diff --git a/tempest/api/compute/v3/servers/test_availability_zone.py b/tempest/api/compute/v3/servers/test_availability_zone.py
deleted file mode 100644
index 5a1e07e..0000000
--- a/tempest/api/compute/v3/servers/test_availability_zone.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014 NEC Corporation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest import test
-
-
-class AZV3Test(base.BaseV3ComputeTest):
-
- """
- Tests Availability Zone API List
- """
-
- @classmethod
- def setUpClass(cls):
- super(AZV3Test, cls).setUpClass()
- cls.client = cls.availability_zone_client
-
- @test.attr(type='gate')
- def test_get_availability_zone_list_with_non_admin_user(self):
- # List of availability zone with non-administrator user
- resp, availability_zone = self.client.get_availability_zone_list()
- self.assertEqual(200, resp.status)
- self.assertTrue(len(availability_zone) > 0)
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index 68b4b9d..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -54,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['admin_password'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4,
@@ -118,25 +111,8 @@
def setUpClass(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
cls.flavor_client = cls.flavors_admin_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- access_ip_v4=cls.accessIPv4,
- access_ip_v6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['admin_password']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@@ -144,7 +120,7 @@
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 512
@@ -156,13 +132,13 @@
create_flavor(flavor_with_eph_disk_name,
ram, vcpus, disk,
flavor_with_eph_disk_id,
- ephemeral=1, swap=1024, rxtx=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ ephemeral=1, rxtx=1))
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -175,18 +151,18 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
- self.assertEqual(resp.status, 202)
+ self.assertEqual(resp.status, 204)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
@@ -199,13 +175,17 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
-
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index 1495cb7..5b35e2a 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -39,6 +39,12 @@
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ _, server = self.client.get_server(self.server_id)
+ self.assertEqual(self.image_ref, server['image']['id'])
+ self.server_check_teardown()
+ super(ServerActionsV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
cls.prepare_instance_network()
@@ -117,7 +123,6 @@
name=new_name,
metadata=meta,
admin_password=password)
- self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -138,6 +143,9 @@
password)
linux_client.validate_authentication()
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, self.image_ref)
+
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
@@ -149,11 +157,7 @@
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
- self.addCleanup(self.client.start, self.server_id)
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
- self.addCleanup(self.client.wait_for_server_status, self.server_id,
- 'SHUTOFF')
- self.addCleanup(self.client.rebuild, self.server_id, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -167,6 +171,12 @@
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
+ # Restore to the original image (The tearDown will test it again)
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, old_image)
+ self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ self.client.start(self.server_id)
+
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index c1d1935..827c4c4 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -37,6 +37,10 @@
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ self.server_check_teardown()
+ super(ServersNegativeV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ServersNegativeV3Test, cls).setUpClass()
@@ -121,12 +125,11 @@
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
- self.addCleanup(self.client.unpause_server,
- self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
+ self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
@@ -330,13 +333,12 @@
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
- self.addCleanup(self.client.resume_server,
- self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
+ self.client.resume_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
@@ -404,7 +406,6 @@
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
- self.addCleanup(self.client.unshelve_server, self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
@@ -425,6 +426,8 @@
self.client.shelve_server,
self.server_id)
+ self.client.unshelve_server(self.server_id)
+
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 0647069..555cbda 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -14,9 +14,12 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class FWaaSExtensionTestJSON(base.BaseNetworkTest):
_interface = 'json'
@@ -67,6 +70,20 @@
except exceptions.NotFound:
pass
+ self.client.wait_for_resource_deletion('firewall', fw_id)
+
+ def _wait_for_active(self, fw_id):
+ def _wait():
+ resp, firewall = self.client.show_firewall(fw_id)
+ self.assertEqual('200', resp['status'])
+ firewall = firewall['firewall']
+ return firewall['status'] == 'ACTIVE'
+
+ if not test.call_until_true(_wait, CONF.network.build_timeout,
+ CONF.network.build_interval):
+ m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+ raise exceptions.TimeoutException(m)
+
@test.attr(type='smoke')
def test_list_firewall_rules(self):
# List firewall rules
@@ -168,6 +185,15 @@
@test.attr(type='smoke')
def test_create_show_delete_firewall(self):
+ # Create tenant network resources required for an ACTIVE firewall
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ router = self.create_router(
+ data_utils.rand_name('router-'),
+ admin_state_up=True)
+ self.client.add_router_interface_with_subnet_id(
+ router['id'], subnet['id'])
+
# Create firewall
resp, body = self.client.create_firewall(
name=data_utils.rand_name("firewall"),
@@ -177,11 +203,16 @@
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
+ self._wait_for_active(firewall_id)
+
# show a created firewall
resp, firewall = self.client.show_firewall(firewall_id)
self.assertEqual('200', resp['status'])
firewall = firewall['firewall']
+
for key, value in firewall.iteritems():
+ if key == 'status':
+ continue
self.assertEqual(created_firewall[key], value)
# list firewall
@@ -198,9 +229,6 @@
# Delete firewall
resp, _ = self.client.delete_firewall(firewall_id)
self.assertEqual('204', resp['status'])
- # Confirm deletion
- # TODO(raies): Confirm deletion can be done only when,
- # deleted firewall status is not "PENDING_DELETE".
class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index d919245..c1f468b 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -27,6 +27,7 @@
class AccountQuotasTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountQuotasTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -101,7 +102,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(AccountQuotasTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 5a79529..4677f97 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,6 +27,7 @@
class AccountQuotasNegativeTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountQuotasNegativeTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -100,7 +101,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(AccountQuotasNegativeTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 4b895d8..7fb0604 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -29,10 +29,13 @@
class AccountTest(base.BaseObjectTest):
+
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
- cls.containers = []
for i in moves.xrange(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 6c71340..581c6d9 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,6 +23,7 @@
class StaticWebTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(StaticWebTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -45,7 +46,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(StaticWebTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 9bd986f..6bda83b 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -31,8 +31,10 @@
class ContainerSyncTest(base.BaseObjectTest):
+ clients = {}
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ContainerSyncTest, cls).setUpClass()
cls.containers = []
@@ -50,7 +52,6 @@
int(container_sync_timeout / cls.container_sync_interval)
# define container and object clients
- cls.clients = {}
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index 81db252..dc5585e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -26,7 +26,11 @@
class ObjectFormPostTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectFormPostTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index fe0c994..878bf6d 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -26,7 +26,11 @@
class ObjectFormPostNegativeTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectFormPostNegativeTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index cf24f66..7d26433 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -27,7 +27,11 @@
class ObjectTempUrlNegativeTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectTempUrlNegativeTest, cls).setUpClass()
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index c27bedf..4e40de9 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,7 +30,7 @@
@classmethod
def setUpClass(cls):
super(BaseOrchestrationTest, cls).setUpClass()
- cls.os = clients.OrchestrationManager()
+ cls.os = clients.Manager()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
cls.build_timeout = CONF.orchestration.build_timeout
@@ -41,6 +41,7 @@
cls.servers_client = cls.os.servers_client
cls.keypairs_client = cls.os.keypairs_client
cls.network_client = cls.os.network_client
+ cls.volumes_client = cls.os.volumes_client
cls.stacks = []
cls.keypairs = []
@@ -136,3 +137,8 @@
return dict((r['resource_name'], r['resource_type'])
for r in resources)
+
+ def get_stack_output(self, stack_identifier, output_key):
+ resp, body = self.client.get_stack(stack_identifier)
+ self.assertEqual('200', resp['status'])
+ return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..3e03a30
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
new file mode 100644
index 0000000..08e3da4
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -0,0 +1,25 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ deletion_policy: 'Retain'
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
index 275d040..63b03f4 100644
--- a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -36,7 +36,6 @@
admin_state_up: false
external_gateway_info:
network: {get_param: ExternalNetworkId}
- enable_snat: false
RouterInterface:
type: OS::Neutron::RouterInterface
properties:
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index b96f6ce..3086d78 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -137,8 +137,6 @@
self.assertEqual('NewRouter', router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
- self.assertEqual(False,
- router['external_gateway_info']['enable_snat'])
self.assertEqual(False, router['admin_state_up'])
@test.attr(type='slow')
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index cb5d941..4b845b1 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -110,14 +110,6 @@
# wait for create to complete.
self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
# This is an assert of great significance, as it means the following
# has happened:
# - cfn-init read the provided metadata and wrote out a file
@@ -125,5 +117,5 @@
# - a cfn-signal was built which was signed with provided credentials
# - the wait condition was fulfilled and the stack has changed state
wait_status = json.loads(
- self.stack_output(body, 'WaitConditionStatus'))
+ self.get_stack_output(sid, 'WaitConditionStatus'))
self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..2544c41
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,101 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(CinderResourcesTest, cls).setUpClass()
+ if not CONF.service_available.cinder:
+ raise cls.skipException('Cinder support is required')
+
+ def _cinder_verify(self, volume_id):
+ self.assertIsNotNone(volume_id)
+ resp, volume = self.volumes_client.get_volume(volume_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual('available', volume.get('status'))
+ self.assertEqual(1, volume.get('size'))
+ self.assertEqual('a descriptive description',
+ volume.get('display_description'))
+
+ def _outputs_verify(self, stack_identifier):
+ self.assertEqual('available',
+ self.get_stack_output(stack_identifier, 'status'))
+ self.assertEqual('1',
+ self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual('a descriptive description',
+ self.get_stack_output(stack_identifier,
+ 'display_description'))
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete(self):
+ """Create and delete a volume via OS::Cinder::Volume."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self._cinder_verify(volume_id)
+
+ # Verify the stack outputs are as expected
+ self._outputs_verify(stack_identifier)
+
+ # Delete the stack and ensure the volume is gone
+ self.client.delete_stack(stack_identifier)
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self.assertRaises(exceptions.NotFound,
+ self.volumes_client.get_volume,
+ volume_id)
+
+ def _cleanup_volume(self, volume_id):
+ """Cleanup the volume direct with cinder."""
+ resp = self.volumes_client.delete_volume(volume_id)
+ self.assertEqual(202, resp[0].status)
+ self.volumes_client.wait_for_resource_deletion(volume_id)
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete_retain(self):
+ """Ensure the 'Retain' deletion policy is respected."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic_delete_retain')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self.addCleanup(self._cleanup_volume, volume_id)
+ self._cinder_verify(volume_id)
+
+ # Verify the stack outputs are as expected
+ self._outputs_verify(stack_identifier)
+
+ # Delete the stack and ensure the volume is *not* gone
+ self.client.delete_stack(stack_identifier)
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self._cinder_verify(volume_id)
+
+ # Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
new file mode 100644
index 0000000..012c231
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest import test
+
+
+class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
+ """
+ Tests Volume Services API.
+ volume service list requires admin privileges.
+ """
+ _interface = "json"
+
+ @classmethod
+ def setUpClass(cls):
+ super(VolumesServicesTestJSON, cls).setUpClass()
+ cls.client = cls.os_adm.volume_services_client
+ resp, cls.services = cls.client.list_services()
+ cls.host_name = cls.services[0]['host']
+ cls.binary_name = cls.services[0]['binary']
+
+ @test.attr(type='gate')
+ def test_list_services(self):
+ resp, services = self.client.list_services()
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+
+ @test.attr(type='gate')
+ def test_get_service_by_service_binary_name(self):
+ params = {'binary': self.binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+ for service in services:
+ self.assertEqual(self.binary_name, service['binary'])
+
+ @test.attr(type='gate')
+ def test_get_service_by_host_name(self):
+ services_on_host = [service for service in self.services if
+ service['host'] == self.host_name]
+ params = {'host': self.host_name}
+
+ resp, services = self.client.list_services(params)
+
+ # we could have a periodic job checkin between the 2 service
+ # lookups, so only compare binary lists.
+ s1 = map(lambda x: x['binary'], services)
+ s2 = map(lambda x: x['binary'], services_on_host)
+ # sort the lists before comparing, to take out dependency
+ # on order.
+ self.assertEqual(sorted(s1), sorted(s2))
+
+ @test.attr(type='gate')
+ def test_get_service_by_service_and_host_name(self):
+ params = {'host': self.host_name, 'binary': self.binary_name}
+
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(1, len(services))
+ self.assertEqual(self.host_name, services[0]['host'])
+ self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4496f18..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -85,24 +85,6 @@
self.volume['id'])
self.assertEqual('error', volume_get['status'])
- @test.attr(type='gate')
- def test_volume_begin_detaching(self):
- # test volume begin detaching : available -> detaching -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('detaching', volume_get['status'])
-
- @test.attr(type='gate')
- def test_volume_roll_detaching(self):
- # test volume roll detaching : detaching -> in-use -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp, body = self.client.volume_roll_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('in-use', volume_get['status'])
-
def test_volume_force_delete_when_volume_is_creating(self):
# test force delete when status of volume is creating
self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 2c6050c..ff616fc 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -107,6 +107,7 @@
cls.snapshots_client = cls.os.snapshots_client
cls.volumes_client = cls.os.volumes_client
cls.backups_client = cls.os.backups_client
+ cls.volume_services_client = cls.os.volume_services_client
cls.volumes_extension_client = cls.os.volumes_extension_client
@classmethod
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+_common_schema = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'certificate': {
+ 'type': 'object',
+ 'properties': {
+ 'data': {'type': 'string'},
+ 'private_key': {'type': 'string'},
+ },
+ 'required': ['data', 'private_key'],
+ }
+ },
+ 'required': ['certificate'],
+ }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+ 'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
index fd02780..aa019e4 100644
--- a/tempest/api_schema/compute/flavors.py
+++ b/tempest/api_schema/compute/flavors.py
@@ -36,6 +36,21 @@
}
}
+common_flavor_info = {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'ram': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ 'swap': {'type': 'integer'},
+ 'disk': {'type': 'integer'},
+ 'id': {'type': 'string'}
+ },
+ 'required': ['name', 'links', 'ram', 'vcpus',
+ 'swap', 'disk', 'id']
+}
+
common_flavor_list_details = {
'status_code': [200],
'response_body': {
@@ -43,22 +58,20 @@
'properties': {
'flavors': {
'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'name': {'type': 'string'},
- 'links': parameter_types.links,
- 'ram': {'type': 'integer'},
- 'vcpus': {'type': 'integer'},
- 'swap': {'type': 'integer'},
- 'disk': {'type': 'integer'},
- 'id': {'type': 'string'}
- },
- 'required': ['name', 'links', 'ram', 'vcpus',
- 'swap', 'disk', 'id']
- }
+ 'items': common_flavor_info
}
},
'required': ['flavors']
}
}
+
+common_flavor_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavor': common_flavor_info
+ },
+ 'required': ['flavor']
+ }
+}
diff --git a/tempest/api_schema/compute/hosts.py b/tempest/api_schema/compute/hosts.py
index a73e214..a9d6567 100644
--- a/tempest/api_schema/compute/hosts.py
+++ b/tempest/api_schema/compute/hosts.py
@@ -12,6 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+common_start_up_body = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'power_action': {'enum': ['startup']}
+ },
+ 'required': ['host', 'power_action']
+}
+
list_hosts = {
'status_code': [200],
'response_body': {
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
new file mode 100644
index 0000000..6723869
--- /dev/null
+++ b/tempest/api_schema/compute/migrations.py
@@ -0,0 +1,56 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_migrations = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'migrations': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'status': {'type': 'string'},
+ 'instance_uuid': {'type': 'string'},
+ 'source_node': {'type': 'string'},
+ 'source_compute': {'type': 'string'},
+ 'dest_node': {'type': 'string'},
+ 'dest_compute': {'type': 'string'},
+ 'dest_host': {'type': 'string'},
+ 'old_instance_type_id': {
+ 'type': ['integer', 'string']
+ },
+ 'new_instance_type_id': {
+ 'type': ['integer', 'string']
+ },
+ 'created_at': {'type': 'string'},
+ 'updated_at': {'type': ['string', 'null']}
+ },
+ 'required': [
+ 'id', 'status', 'instance_uuid', 'source_node',
+ 'source_compute', 'dest_node', 'dest_compute',
+ 'dest_host', 'old_instance_type_id',
+ 'new_instance_type_id', 'created_at', 'updated_at'
+ ]
+ }
+ }
+ },
+ 'required': ['migrations']
+ }
+}
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index 31cd56b..24cdedd 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -55,10 +55,19 @@
'response_body': {
'type': 'object',
'properties': {
- 'metadata': {'type': 'object'}
+ 'metadata': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
},
'required': ['metadata']
}
}
list_server_metadata = copy.deepcopy(set_server_metadata)
+
+delete_server_metadata_item = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..de3e12b
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+delete_aggregate = {
+ 'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
index 48e6ceb..bee6ecb 100644
--- a/tempest/api_schema/compute/v2/flavors.py
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -35,3 +35,23 @@
unset_flavor_extra_specs = {
'status_code': [200]
}
+
+create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+create_get_flavor_details['response_body']['properties']['flavor'][
+ 'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+create_get_flavor_details['response_body']['properties']['flavor'][
+ 'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+ 'os-flavor-access:is_public': {'type': 'boolean'},
+ 'rxtx_factor': {'type': 'number'},
+ 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+delete_flavor = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
index cd6bd7b..9ec8848 100644
--- a/tempest/api_schema/compute/v2/hosts.py
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -14,18 +14,12 @@
import copy
-body = {
- 'type': 'object',
- 'properties': {
- 'host': {'type': 'string'},
- 'power_action': {'enum': ['startup']}
- },
- 'required': ['host', 'power_action']
-}
+from tempest.api_schema.compute import hosts
+
startup_host = {
'status_code': [200],
- 'response_body': body
+ 'response_body': hosts.common_start_up_body
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index fad6b56..d121060 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -14,43 +14,46 @@
from tempest.api_schema.compute import parameter_types
+common_image_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'minDisk': {'type': 'integer'},
+ 'minRam': {'type': 'integer'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+ },
+ # 'server' attributes only comes in response body if image is
+ # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+ # extension, So those are not defined as 'required'.
+ 'required': ['id', 'status', 'updated', 'links', 'name',
+ 'created', 'minDisk', 'minRam', 'progress',
+ 'metadata']
+}
+
get_image = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'status': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'links': parameter_types.links,
- 'name': {'type': 'string'},
- 'created': {'type': 'string'},
- 'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
- 'minDisk': {'type': 'integer'},
- 'minRam': {'type': 'integer'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'server': {
- 'type': 'object',
- 'properties': {
- # NOTE: Now the type of 'id' is integer, but here
- # allows 'string' also because we will be able to
- # change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- }
- },
- # 'server' attributes only comes in response body if image is
- # associated with any server. So it is not 'required'
- 'required': ['id', 'status', 'updated', 'links', 'name',
- 'created', 'OS-EXT-IMG-SIZE:size', 'minDisk',
- 'minRam', 'progress', 'metadata']
- }
+ 'image': common_image_schema
},
'required': ['image']
}
@@ -67,20 +70,7 @@
'type': 'object',
'properties': {
'id': {'type': 'string'},
- 'links': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'href': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'rel': {'type': 'string'}
- },
- 'required': ['href', 'rel']
- }
- },
+ 'links': parameter_types.links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
@@ -92,7 +82,17 @@
}
create_image = {
- 'status_code': [202]
+ 'status_code': [202],
+ 'response_header': {
+ 'type': 'object',
+ 'properties': {
+ 'location': {
+ 'type': 'string',
+ 'format': 'uri'
+ }
+ },
+ 'required': ['location']
+ }
}
delete = {
@@ -120,3 +120,17 @@
'required': ['meta']
}
}
+
+list_images_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'images': {
+ 'type': 'array',
+ 'items': common_image_schema
+ }
+ },
+ 'required': ['images']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 6dd44cd..8b4bead 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -12,6 +12,49 @@
# License for the specific language governing permissions and limitations
# under the License.
+common_security_group_rule = {
+ 'from_port': {'type': ['integer', 'null']},
+ 'to_port': {'type': ['integer', 'null']},
+ 'group': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ }
+ },
+ 'ip_protocol': {'type': ['string', 'null']},
+ # 'parent_group_id' can be UUID so defining it as 'string' also.
+ 'parent_group_id': {'type': ['string', 'integer', 'null']},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ }
+ # When optional argument is provided in request body
+ # like 'group_id' then, attribute 'cidr' does not
+ # comes in response body. So it is not 'required'.
+ },
+ 'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'rules': {
+ 'type': 'array',
+ 'items': {
+ 'type': ['object', 'null'],
+ 'properties': common_security_group_rule
+ }
+ },
+ 'description': {'type': 'string'},
+ },
+ 'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
list_security_groups = {
'status_code': [200],
'response_body': {
@@ -19,24 +62,24 @@
'properties': {
'security_groups': {
'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': ['integer', 'string']},
- 'name': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'rules': {'type': 'array'},
- 'description': {'type': 'string'},
- },
- 'required': ['id', 'name', 'tenant_id', 'rules',
- 'description'],
- }
+ 'items': common_security_group
}
},
'required': ['security_groups']
}
}
+get_security_group = create_security_group = update_security_group = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group': common_security_group
+ },
+ 'required': ['security_group']
+ }
+}
+
create_security_group_rule = {
'status_code': [200],
'response_body': {
@@ -44,25 +87,7 @@
'properties': {
'security_group_rule': {
'type': 'object',
- 'properties': {
- 'from_port': {'type': 'integer'},
- 'to_port': {'type': 'integer'},
- 'group': {'type': 'object'},
- 'ip_protocol': {'type': 'string'},
- # 'parent_group_id' can be UUID so defining it
- # as 'string' also.
- 'parent_group_id': {'type': ['integer', 'string']},
- 'id': {'type': ['integer', 'string']},
- 'ip_range': {
- 'type': 'object',
- 'properties': {
- 'cidr': {'type': 'string'}
- }
- # When optional argument is provided in request body
- # like 'group_id' then, attribute 'cidr' does not
- # comes in response body. So it is not 'required'.
- }
- },
+ 'properties': common_security_group_rule,
'required': ['from_port', 'to_port', 'group', 'ip_protocol',
'parent_group_id', 'id', 'ip_range']
}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index eed4589..5be51e1 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -90,3 +90,19 @@
detach_volume = {
'status_code': [202]
}
+
+set_get_server_metadata_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'meta': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
+ },
+ 'required': ['meta']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..358e455
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+delete_aggregate = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
index 468658c..52010f5 100644
--- a/tempest/api_schema/compute/v3/flavors.py
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -39,3 +39,30 @@
unset_flavor_extra_specs = {
'status_code': [204]
}
+
+get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+get_flavor_details['response_body']['properties']['flavor'][
+ 'properties'].update({'disabled': {'type': 'boolean'},
+ 'ephemeral': {'type': 'integer'},
+ 'flavor-access:is_public': {'type': 'boolean'},
+ 'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+get_flavor_details['response_body']['properties']['flavor'][
+ 'required'].extend(['disabled', 'ephemeral'])
+
+
+create_flavor_details = copy.deepcopy(get_flavor_details)
+
+# Overriding the status code for create flavor V3 API.
+create_flavor_details['status_code'] = [201]
+
+delete_flavor = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
index 2cf8f9b..575a6e2 100644
--- a/tempest/api_schema/compute/v3/hosts.py
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -13,14 +13,16 @@
# under the License.
import copy
-from tempest.api_schema.compute.v2 import hosts
+
+from tempest.api_schema.compute import hosts
+
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'host': hosts.body
+ 'host': hosts.common_start_up_body
},
'required': ['host']
}
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index f2a4b78..cace7d2 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
create_server = {
'status_code': [202],
@@ -46,3 +48,5 @@
attach_detach_volume = {
'status_code': [202]
}
+
+set_get_server_metadata_item = copy.deepcopy(servers.set_server_metadata)
diff --git a/tempest/auth.py b/tempest/auth.py
index 5fc923f..ac8cbd1 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
:param client_type: 'tempest' or 'official'
:param interface: 'json' or 'xml'. Applicable for tempest client only
"""
+ credentials = self._convert_credentials(credentials)
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
- self.credentials = credentials
self.client_type = client_type
self.interface = interface
if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
self.alt_auth_data = None
self.alt_part = None
+ def _convert_credentials(self, credentials):
+ # Support dict credentials for backwards compatibility
+ if isinstance(credentials, dict):
+ return get_credentials(**credentials)
+ else:
+ return credentials
+
def __str__(self):
return "Creds :{creds}, client type: {client_type}, interface: " \
"{interface}, cached auth data: {cache}".format(
@@ -76,9 +83,9 @@
@classmethod
def check_credentials(cls, credentials):
"""
- Verify credentials are valid. Subclasses can do a better check.
+ Verify credentials are valid.
"""
- return isinstance(credentials, dict)
+ return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
@@ -218,16 +225,6 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
- @classmethod
- def check_credentials(cls, credentials, scoped=True):
- # tenant_name is optional if not scoped
- valid = super(KeystoneV2AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials
- if scoped:
- valid = valid and 'tenant_name' in credentials
- return valid
-
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
@@ -240,9 +237,9 @@
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
- user=self.credentials['username'],
- password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
auth_data=True)
else:
raise NotImplementedError
@@ -303,12 +300,15 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+ def _convert_credentials(self, credentials):
+ # For V3 do not convert as V3 Credentials are not defined yet
+ return credentials
+
@classmethod
def check_credentials(cls, credentials, scoped=True):
# tenant_name is optional if not scoped
- valid = super(KeystoneV3AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials and 'domain_name' in credentials
+ valid = 'username' in credentials and 'password' in credentials \
+ and 'domain_name' in credentials
if scoped:
valid = valid and 'tenant_name' in credentials
return valid
@@ -327,7 +327,7 @@
return dict(
user=self.credentials['username'],
password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
+ tenant=self.credentials['tenant_name'],
domain=self.credentials['domain_name'],
auth_data=True)
else:
@@ -398,3 +398,144 @@
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
+
+
+def get_credentials(credential_type=None, **kwargs):
+ """
+ Builds a credentials object based on the configured auth_version
+
+ :param credential_type (string): requests credentials from tempest
+ configuration file. Valid values are defined in
+ Credentials.TYPE.
+ :param kwargs (dict): take into account only if credential_type is
+ not specified or None. Dict of credential key/value pairs
+
+ Examples:
+
+ Returns credentials from the provided parameters:
+ >>> get_credentials(username='foo', password='bar')
+
+ Returns credentials from tempest configuration:
+ >>> get_credentials(credential_type='user')
+ """
+ if CONF.identity.auth_version == 'v2':
+ credential_class = KeystoneV2Credentials
+ else:
+ raise exceptions.InvalidConfiguration('Unsupported auth version')
+ if credential_type is not None:
+ creds = credential_class.get_default(credential_type)
+ else:
+ creds = credential_class(**kwargs)
+ return creds
+
+
+class Credentials(object):
+ """
+ Set of credentials for accessing OpenStack services
+
+ ATTRIBUTES: list of valid class attributes representing credentials.
+
+ TYPES: types of credentials available in the configuration file.
+ For each key there's a tuple (section, prefix) to match the
+ configuration options.
+ """
+
+ ATTRIBUTES = []
+ TYPES = {
+ 'identity_admin': ('identity', 'admin'),
+ 'compute_admin': ('compute_admin', None),
+ 'user': ('identity', None),
+ 'alt_user': ('identity', 'alt')
+ }
+
+ def __init__(self, **kwargs):
+ """
+ Enforce the available attributes at init time (only).
+ Additional attributes can still be set afterwards if tests need
+ to do so.
+ """
+ self._apply_credentials(kwargs)
+
+ def _apply_credentials(self, attr):
+ for key in attr.keys():
+ if key in self.ATTRIBUTES:
+ setattr(self, key, attr[key])
+ else:
+ raise exceptions.InvalidCredentials
+
+ def __str__(self):
+ """
+ Represent only attributes included in self.ATTRIBUTES
+ """
+ _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+ return str(_repr)
+
+ def __eq__(self, other):
+ """
+ Credentials are equal if attributes in self.ATTRIBUTES are equal
+ """
+ return str(self) == str(other)
+
+ def __getattr__(self, key):
+ # If an attribute is set, __getattr__ is not invoked
+ # If an attribute is not set, and it is a known one, return None
+ if key in self.ATTRIBUTES:
+ return None
+ else:
+ raise AttributeError
+
+ def __delitem__(self, key):
+ # For backwards compatibility, support dict behaviour
+ if key in self.ATTRIBUTES:
+ delattr(self, key)
+ else:
+ raise AttributeError
+
+ def get(self, item, default):
+ # In this patch act as dict for backward compatibility
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+ @classmethod
+ def get_default(cls, credentials_type):
+ if credentials_type not in cls.TYPES:
+ raise exceptions.InvalidCredentials()
+ creds = cls._get_default(credentials_type)
+ if not creds.is_valid():
+ raise exceptions.InvalidConfiguration()
+ return creds
+
+ @classmethod
+ def _get_default(cls, credentials_type):
+ raise NotImplementedError
+
+ def is_valid(self):
+ raise NotImplementedError
+
+
+class KeystoneV2Credentials(Credentials):
+
+ CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+ ATTRIBUTES = ['user_id', 'tenant_id']
+ ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+ @classmethod
+ def _get_default(cls, credentials_type='user'):
+ params = {}
+ section, prefix = cls.TYPES[credentials_type]
+ for attr in cls.CONF_ATTRIBUTES:
+ _section = getattr(CONF, section)
+ if prefix is None:
+ params[attr] = getattr(_section, attr)
+ else:
+ params[attr] = getattr(_section, prefix + "_" + attr)
+ return KeystoneV2Credentials(**params)
+
+ def is_valid(self):
+ """
+ Minimum set of valid credentials, are username and password.
+ Tenant is optional.
+ """
+ return None not in (self.username, self.password)
diff --git a/tempest/clients.py b/tempest/clients.py
index 0ebbd7c..646a2d9 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -161,6 +161,8 @@
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClientJSON
+from tempest.services.volume.json.admin.volume_services_client import \
+ VolumesServicesClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.backups_client import BackupsClientJSON
@@ -174,6 +176,8 @@
VolumeHostsClientXML
from tempest.services.volume.xml.admin.volume_quotas_client import \
VolumeQuotasClientXML
+from tempest.services.volume.xml.admin.volume_services_client import \
+ VolumesServicesClientXML
from tempest.services.volume.xml.admin.volume_types_client import \
VolumeTypesClientXML
from tempest.services.volume.xml.backups_client import BackupsClientXML
@@ -240,6 +244,8 @@
self.availability_zone_client = AvailabilityZoneClientXML(
self.auth_provider)
self.service_client = ServiceClientXML(self.auth_provider)
+ self.volume_services_client = VolumesServicesClientXML(
+ self.auth_provider)
self.aggregates_client = AggregatesClientXML(self.auth_provider)
self.services_client = ServicesClientXML(self.auth_provider)
self.tenant_usages_client = TenantUsagesClientXML(
@@ -315,6 +321,8 @@
self.services_v3_client = ServicesV3ClientJSON(
self.auth_provider)
self.service_client = ServiceClientJSON(self.auth_provider)
+ self.volume_services_client = VolumesServicesClientJSON(
+ self.auth_provider)
self.agents_v3_client = AgentsV3ClientJSON(self.auth_provider)
self.aggregates_v3_client = AggregatesV3ClientJSON(
self.auth_provider)
@@ -432,24 +440,6 @@
service=service)
-class OrchestrationManager(Manager):
- """
- Manager object that uses the admin credentials for its
- so that heat templates can create users
- """
- def __init__(self, interface='json', service=None):
- base = super(OrchestrationManager, self)
- # heat currently needs an admin user so that stacks can create users
- # however the tests need the demo tenant so that the neutron
- # private network is the default. DO NOT change this auth combination
- # until heat can run with the demo user.
- base.__init__(CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.tenant_name,
- interface=interface,
- service=service)
-
-
class OfficialClientManager(manager.Manager):
"""
Manager that provides access to the official python clients for
diff --git a/tempest/cmd/__init__.py b/tempest/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cmd/__init__.py
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
new file mode 100755
index 0000000..7b2e60b
--- /dev/null
+++ b/tempest/cmd/verify_tempest_config.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import json
+import os
+import sys
+import urlparse
+
+import httplib2
+from six.moves import configparser
+
+from tempest import clients
+from tempest import config
+
+
+CONF = config.CONF
+RAW_HTTP = httplib2.Http()
+CONF_FILE = None
+OUTFILE = sys.stdout
+
+
+def _get_config_file():
+ default_config_dir = os.path.join(os.path.abspath(
+ os.path.dirname(os.path.dirname(__file__))), "etc")
+ default_config_file = "tempest.conf"
+
+ conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
+ conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
+ path = os.path.join(conf_dir, conf_file)
+ fd = open(path, 'rw')
+ return fd
+
+
+def change_option(option, group, value):
+ config_parse = configparser.SafeConfigParser()
+ config_parse.optionxform = str
+ config_parse.readfp(CONF_FILE)
+ if not config_parse.has_section(group):
+ config_parse.add_section(group)
+ config_parse.set(group, option, str(value))
+ global OUTFILE
+ config_parse.write(OUTFILE)
+
+
+def print_and_or_update(option, group, value, update):
+ print('Config option %s in group %s should be changed to: %s'
+ % (option, group, value))
+ if update:
+ change_option(option, group, value)
+
+
+def verify_glance_api_versions(os, update):
+ # Check glance api versions
+ __, versions = os.image_client.get_versions()
+ if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
+ versions):
+ print_and_or_update('api_v1', 'image_feature_enabled',
+ not CONF.image_feature_enabled.api_v1, update)
+ if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'image_feature_enabled',
+ not CONF.image_feature_enabled.api_v2, update)
+
+
+def _get_unversioned_endpoint(base_url):
+ endpoint_parts = urlparse.urlparse(base_url)
+ endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
+ return endpoint
+
+
+def _get_api_versions(os, service):
+ client_dict = {
+ 'nova': os.servers_client,
+ 'keystone': os.identity_client,
+ 'cinder': os.volumes_client,
+ }
+ client_dict[service].skip_path()
+ endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
+ __, body = RAW_HTTP.request(endpoint, 'GET')
+ client_dict[service].reset_path()
+ body = json.loads(body)
+ if service == 'keystone':
+ versions = map(lambda x: x['id'], body['versions']['values'])
+ else:
+ versions = map(lambda x: x['id'], body['versions'])
+ return versions
+
+
+def verify_keystone_api_versions(os, update):
+ # Check keystone api versions
+ versions = _get_api_versions(os, 'keystone')
+ if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'identity_feature_enabled',
+ not CONF.identity_feature_enabled.api_v2, update)
+ if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+ print_and_or_update('api_v3', 'identity_feature_enabled',
+ not CONF.identity_feature_enabled.api_v3, update)
+
+
+def verify_nova_api_versions(os, update):
+ versions = _get_api_versions(os, 'nova')
+ if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
+ print_and_or_update('api_v3', 'compute_feature_enabled',
+ not CONF.compute_feature_enabled.api_v3, update)
+
+
+def verify_cinder_api_versions(os, update):
+ # Check cinder api versions
+ versions = _get_api_versions(os, 'cinder')
+ if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+ print_and_or_update('api_v1', 'volume_feature_enabled',
+ not CONF.volume_feature_enabled.api_v1, update)
+ if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'volume_feature_enabled',
+ not CONF.volume_feature_enabled.api_v2, update)
+
+
+def get_extension_client(os, service):
+ extensions_client = {
+ 'nova': os.extensions_client,
+ 'nova_v3': os.extensions_v3_client,
+ 'cinder': os.volumes_extension_client,
+ 'neutron': os.network_client,
+ 'swift': os.account_client,
+ }
+ if service not in extensions_client:
+ print('No tempest extensions client for %s' % service)
+ exit(1)
+ return extensions_client[service]
+
+
+def get_enabled_extensions(service):
+ extensions_options = {
+ 'nova': CONF.compute_feature_enabled.api_extensions,
+ 'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
+ 'cinder': CONF.volume_feature_enabled.api_extensions,
+ 'neutron': CONF.network_feature_enabled.api_extensions,
+ 'swift': CONF.object_storage_feature_enabled.discoverable_apis,
+ }
+ if service not in extensions_options:
+ print('No supported extensions list option for %s' % service)
+ exit(1)
+ return extensions_options[service]
+
+
+def verify_extensions(os, service, results):
+ extensions_client = get_extension_client(os, service)
+ __, resp = extensions_client.list_extensions()
+ if isinstance(resp, dict):
+ # Neutron's extension 'name' field has is not a single word (it has
+ # spaces in the string) Since that can't be used for list option the
+ # api_extension option in the network-feature-enabled group uses alias
+ # instead of name.
+ if service == 'neutron':
+ extensions = map(lambda x: x['alias'], resp['extensions'])
+ elif service == 'swift':
+ # Remove Swift general information from extensions list
+ resp.pop('swift')
+ extensions = resp.keys()
+ else:
+ extensions = map(lambda x: x['name'], resp['extensions'])
+
+ else:
+ extensions = map(lambda x: x['name'], resp)
+ if not results.get(service):
+ results[service] = {}
+ extensions_opt = get_enabled_extensions(service)
+ if extensions_opt[0] == 'all':
+ results[service]['extensions'] = extensions
+ return results
+ # Verify that all configured extensions are actually enabled
+ for extension in extensions_opt:
+ results[service][extension] = extension in extensions
+ # Verify that there aren't additional extensions enabled that aren't
+ # specified in the config list
+ for extension in extensions:
+ if extension not in extensions_opt:
+ results[service][extension] = False
+ return results
+
+
+def display_results(results, update, replace):
+ update_dict = {
+ 'swift': 'object-storage-feature-enabled',
+ 'nova': 'compute-feature-enabled',
+ 'nova_v3': 'compute-feature-enabled',
+ 'cinder': 'volume-feature-enabled',
+ 'neutron': 'network-feature-enabled',
+ }
+ for service in results:
+ # If all extensions are specified as being enabled there is no way to
+ # verify this so we just assume this to be true
+ if results[service].get('extensions'):
+ if replace:
+ output_list = results[service].get('extensions')
+ else:
+ output_list = ['all']
+ else:
+ extension_list = get_enabled_extensions(service)
+ output_list = []
+ for extension in results[service]:
+ if not results[service][extension]:
+ if extension in extension_list:
+ print("%s extension: %s should not be included in the "
+ "list of enabled extensions" % (service,
+ extension))
+ else:
+ print("%s extension: %s should be included in the list"
+ " of enabled extensions" % (service, extension))
+ output_list.append(extension)
+ else:
+ output_list.append(extension)
+ if update:
+ # Sort List
+ output_list.sort()
+ # Convert list to a string
+ output_string = ', '.join(output_list)
+ if service == 'swift':
+ change_option('discoverable_apis', update_dict[service],
+ output_string)
+ elif service == 'nova_v3':
+ change_option('api_v3_extensions', update_dict[service],
+ output_string)
+ else:
+ change_option('api_extensions', update_dict[service],
+ output_string)
+
+
+def check_service_availability(os, update):
+ services = []
+ avail_services = []
+ codename_match = {
+ 'volume': 'cinder',
+ 'network': 'neutron',
+ 'image': 'glance',
+ 'object_storage': 'swift',
+ 'compute': 'nova',
+ 'orchestration': 'heat',
+ 'metering': 'ceilometer',
+ 'telemetry': 'ceilometer',
+ 'data_processing': 'sahara',
+ 'baremetal': 'ironic',
+ 'identity': 'keystone',
+ 'queuing': 'marconi',
+ 'database': 'trove'
+ }
+ # Get catalog list for endpoints to use for validation
+ __, endpoints = os.endpoints_client.list_endpoints()
+ for endpoint in endpoints:
+ __, service = os.service_client.get_service(endpoint['service_id'])
+ services.append(service['type'])
+ # Pull all catalog types from config file and compare against endpoint list
+ for cfgname in dir(CONF._config):
+ cfg = getattr(CONF, cfgname)
+ catalog_type = getattr(cfg, 'catalog_type', None)
+ if not catalog_type:
+ continue
+ else:
+ if cfgname == 'identity':
+ # Keystone is a required service for tempest
+ continue
+ if catalog_type not in services:
+ if getattr(CONF.service_available, codename_match[cfgname]):
+ print('Endpoint type %s not found either disable service '
+ '%s or fix the catalog_type in the config file' % (
+ catalog_type, codename_match[cfgname]))
+ if update:
+ change_option(codename_match[cfgname],
+ 'service_available', False)
+ else:
+ if not getattr(CONF.service_available,
+ codename_match[cfgname]):
+ print('Endpoint type %s is available, service %s should be'
+ ' set as available in the config file.' % (
+ catalog_type, codename_match[cfgname]))
+ if update:
+ change_option(codename_match[cfgname],
+ 'service_available', True)
+ else:
+ avail_services.append(codename_match[cfgname])
+ return avail_services
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', '--update', action='store_true',
+ help='Update the config file with results from api '
+ 'queries. This assumes whatever is set in the '
+ 'config file is incorrect. In the case of '
+ 'endpoint checks where it could either be the '
+ 'incorrect catalog type or the service available '
+ 'option the service available option is assumed '
+ 'to be incorrect and is thus changed')
+ parser.add_argument('-o', '--output',
+ help="Output file to write an updated config file to. "
+ "This has to be a separate file from the "
+ "original config file. If one isn't specified "
+ "with -u the new config file will be printed to "
+ "STDOUT")
+ parser.add_argument('-r', '--replace-ext', action='store_true',
+ help="If specified the all option will be replaced "
+ "with a full list of extensions")
+ args = parser.parse_args()
+ return args
+
+
+def main():
+ print('Running config verification...')
+ opts = parse_args()
+ update = opts.update
+ replace = opts.replace_ext
+ global CONF_FILE
+ global OUTFILE
+ if update:
+ CONF_FILE = _get_config_file()
+ if opts.output:
+ OUTFILE = open(opts.output, 'w+')
+ os = clients.ComputeAdminManager(interface='json')
+ services = check_service_availability(os, update)
+ results = {}
+ for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
+ if service == 'nova_v3' and 'nova' not in services:
+ continue
+ elif service not in services:
+ continue
+ results = verify_extensions(os, service, results)
+ verify_keystone_api_versions(os, update)
+ verify_glance_api_versions(os, update)
+ verify_nova_api_versions(os, update)
+ verify_cinder_api_versions(os, update)
+ display_results(results, update, replace)
+ if CONF_FILE:
+ CONF_FILE.close()
+ OUTFILE.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 8c07d4f..8667445 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -282,7 +282,7 @@
return ""
def _log_request(self, method, req_url, resp,
- secs="", req_headers=None,
+ secs="", req_headers={},
req_body=None, resp_body=None):
# if we have the request id, put it in the right part of the log
extra = dict(request_id=self._get_request_id(resp))
@@ -306,6 +306,8 @@
# world this is important to match
trace_regex = CONF.debug.trace_requests
if trace_regex and re.search(trace_regex, caller_name):
+ if 'X-Auth-Token' in req_headers:
+ req_headers['X-Auth-Token'] = '<omitted>'
log_fmt = """Request (%s): %s %s %s%s
Request - Headers: %s
Body: %s
@@ -369,7 +371,7 @@
# Parse one-item-like xmls (user, role, etc)
return common.xml_to_json(element)
- def response_checker(self, method, url, headers, body, resp, resp_body):
+ def response_checker(self, method, resp, resp_body):
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
@@ -413,8 +415,7 @@
resp_body=resp_body)
# Verify HTTP response codes
- self.response_checker(method, url, req_headers, req_body, resp,
- resp_body)
+ self.response_checker(method, resp, resp_body)
return resp, resp_body
@@ -593,10 +594,12 @@
msg = ("The status code(%s) is different than the expected "
"one(%s)") % (resp.status, response_code)
raise exceptions.InvalidHttpSuccessCode(msg)
- response_schema = schema.get('response_body')
- if response_schema:
+
+ # Check the body of a response
+ body_schema = schema.get('response_body')
+ if body_schema:
try:
- jsonschema.validate(body, response_schema)
+ jsonschema.validate(body, body_schema)
except jsonschema.ValidationError as ex:
msg = ("HTTP response body is invalid (%s)") % ex
raise exceptions.InvalidHTTPResponseBody(msg)
@@ -605,6 +608,15 @@
msg = ("HTTP response body should not exist (%s)") % body
raise exceptions.InvalidHTTPResponseBody(msg)
+ # Check the header of a response
+ header_schema = schema.get('response_header')
+ if header_schema:
+ try:
+ jsonschema.validate(resp, header_schema)
+ except jsonschema.ValidationError as ex:
+ msg = ("HTTP response header is invalid (%s)") % ex
+ raise exceptions.InvalidHTTPResponseHeader(msg)
+
class NegativeRestClient(RestClient):
"""
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 857e1e8..2e2c5ec 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -197,6 +197,10 @@
message = "HTTP response body is invalid json or xml"
+class InvalidHTTPResponseHeader(RestClientException):
+ message = "HTTP response header is invalid"
+
+
class InvalidContentType(RestClientException):
message = "Invalid content type provided"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index f297f22..183d422 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -15,6 +15,8 @@
import os
import re
+import pep8
+
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
@@ -56,6 +58,10 @@
def no_setupclass_for_unit_tests(physical_line, filename):
+
+ if pep8.noqa(physical_line):
+ return
+
if 'tempest/tests' in filename:
if SETUPCLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index db1badd..d771aed 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -302,6 +302,7 @@
"http://{0}/".format(vip_ip)).read())
return resp
+ @test.skip_because(bug='1295165')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_load_balancer_basic(self):
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index b1b06cc..c76a117 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -335,8 +335,6 @@
if should_succeed:
msg = "Timed out waiting for %s to become reachable" % ip
else:
- # todo(yfried): remove this line when bug 1252620 is fixed
- return True
msg = "%s is reachable" % ip
try:
self.assertTrue(self._check_remote_connectivity(access_point, ip,
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 54d1252..5c0b5d3 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -16,6 +16,7 @@
import json
from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -66,7 +67,9 @@
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v2_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v2schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 65d2657..89cbe1d 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -56,6 +56,7 @@
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
+ self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -79,11 +80,14 @@
resp, body = self.post('flavors', post_body)
body = json.loads(body)
+ self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
- return self.delete("flavors/%s" % str(flavor_id))
+ resp, body = self.delete("flavors/{0}".format(flavor_id))
+ self.validate_response(v2schema.delete_flavor, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index bd39a04..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -70,6 +70,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_images_details, resp, body)
return resp, body['images']
def get_image(self, image_id):
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
index a13349e..beef5d2 100644
--- a/tempest/services/compute/json/migrations_client.py
+++ b/tempest/services/compute/json/migrations_client.py
@@ -15,6 +15,7 @@
import json
import urllib
+from tempest.api_schema.compute import migrations as schema
from tempest.common import rest_client
from tempest import config
@@ -36,4 +37,5 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_migrations, resp, body)
return resp, body['migrations']
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 7411fb7..c19baf3 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -47,6 +47,7 @@
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def create_security_group(self, name, description):
@@ -62,6 +63,7 @@
post_body = json.dumps({'security_group': post_body})
resp, body = self.post('os-security-groups', post_body)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def update_security_group(self, security_group_id, name=None,
@@ -81,6 +83,7 @@
resp, body = self.put('os-security-groups/%s' % str(security_group_id),
post_body)
body = json.loads(body)
+ self.validate_response(schema.update_security_group, resp, body)
return resp, body['security_group']
def delete_security_group(self, security_group_id):
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 82ded4c..77cbf42 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -300,6 +300,8 @@
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['meta']
def set_server_metadata_item(self, server_id, key, meta):
@@ -307,11 +309,15 @@
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['meta']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
+ self.validate_response(common_schema.delete_server_metadata_item,
+ resp, body)
return resp, body
def stop(self, server_id, **kwargs):
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 0fc6af9..2487ee7 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -16,6 +16,7 @@
import json
from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -66,7 +67,9 @@
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v3_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v3schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 602fee2..5afab5a 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -56,6 +56,7 @@
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
+ self.validate_response(v3schema.get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -79,11 +80,14 @@
resp, body = self.post('flavors', post_body)
body = json.loads(body)
+ self.validate_response(v3schema.create_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
- return self.delete("flavors/%s" % str(flavor_id))
+ resp, body = self.delete("flavors/{0}".format(flavor_id))
+ self.validate_response(v3schema.delete_flavor, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
index efd39b7..c821567 100644
--- a/tempest/services/compute/v3/json/migration_client.py
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -12,8 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
import urllib
+from tempest.api_schema.compute import migrations as schema
from tempest.common import rest_client
from tempest import config
@@ -34,4 +36,6 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(schema.list_migrations, resp, body)
+ return resp, body['migrations']
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 67232a8..49ee2ac 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -308,6 +308,8 @@
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['metadata']
def set_server_metadata_item(self, server_id, key, meta):
@@ -315,11 +317,15 @@
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['metadata']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
+ self.validate_response(common_schema.delete_server_metadata_item,
+ resp, body)
return resp, body
def stop(self, server_id, **kwargs):
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
new file mode 100644
index 0000000..d43c04a
--- /dev/null
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesClientJSON, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['services']
diff --git a/tempest/services/volume/xml/admin/volume_services_client.py b/tempest/services/volume/xml/admin/volume_services_client.py
new file mode 100644
index 0000000..7bad16d
--- /dev/null
+++ b/tempest/services/volume/xml/admin/volume_services_client.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientXML(rest_client.RestClient):
+ TYPE = "xml"
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesClientXML, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ node = etree.fromstring(body)
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
+ return resp, body
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+ def _create_keypair(self):
+ keyname = data_utils.rand_name("key")
+ resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+ assert(resp.status == 200)
+
+ def _delete_keypair(self):
+ resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+ assert(resp.status == 202)
+
+ def _create_vm(self):
+ self.name = name = data_utils.rand_name("instance")
+ servers_client = self.manager.servers_client
+ self.logger.info("creating %s" % name)
+ vm_args = self.vm_extra_args.copy()
+ vm_args['security_groups'] = [self.sec_grp]
+ vm_args['key_name'] = self.key['name']
+ resp, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
+ self.server_id = server['id']
+ assert(resp.status == 202)
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
+
+ def _destroy_vm(self):
+ self.logger.info("deleting server: %s" % self.server_id)
+ resp, _ = self.manager.servers_client.delete_server(self.server_id)
+ assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.wait_for_server_termination(self.server_id)
+ self.logger.info("deleted server: %s" % self.server_id)
+
+ def _create_sec_group(self):
+ sec_grp_cli = self.manager.security_groups_client
+ s_name = data_utils.rand_name('sec_grp-')
+ s_description = data_utils.rand_name('desc-')
+ _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+ s_description)
+ create_rule = sec_grp_cli.create_security_group_rule
+ create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+ create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+ def _destroy_sec_grp(self):
+ sec_grp_cli = self.manager.security_groups_client
+ sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+ def _create_floating_ip(self):
+ floating_cli = self.manager.floating_ips_client
+ _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+ def _destroy_floating_ip(self):
+ cli = self.manager.floating_ips_client
+ cli.delete_floating_ip(self.floating['id'])
+ cli.wait_for_resource_deletion(self.floating['id'])
+ self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+ def _create_volume(self):
+ name = data_utils.rand_name("volume")
+ self.logger.info("creating volume: %s" % name)
+ volumes_client = self.manager.volumes_client
+ resp, self.volume = volumes_client.create_volume(size=1,
+ display_name=
+ name)
+ assert(resp.status == 200)
+ volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ self.logger.info("created volume: %s" % self.volume['id'])
+
+ def _delete_volume(self):
+ self.logger.info("deleting volume: %s" % self.volume['id'])
+ volumes_client = self.manager.volumes_client
+ resp, _ = volumes_client.delete_volume(self.volume['id'])
+ assert(resp.status == 202)
+ volumes_client.wait_for_resource_deletion(self.volume['id'])
+ self.logger.info("deleted volume: %s" % self.volume['id'])
+
+ def _wait_disassociate(self):
+ cli = self.manager.floating_ips_client
+
+ def func():
+ _, floating = cli.get_floating_ip_details(self.floating['id'])
+ return floating['instance_id'] is None
+
+ if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise RuntimeError("IP disassociate timeout!")
+
+ def new_server_ops(self):
+ self._create_vm()
+ cli = self.manager.floating_ips_client
+ cli.associate_floating_ip_to_server(self.floating['ip'],
+ self.server_id)
+ if self.ssh_test_before_attach and self.enable_ssh_verify:
+ self.logger.info("Scanning for block devices via ssh on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+
+ def setUp(self, **kwargs):
+ """Note able configuration combinations:
+ Closest options to the test_stamp_pattern:
+ new_server = True
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = False
+ Just attaching:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ Mostly API load by repeated attachment:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = False
+ ssh_test_before_attach = False
+ Minimal Nova load, but cinder load not decreased:
+ new_server = False
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ """
+ self.image = CONF.compute.image_ref
+ self.flavor = CONF.compute.flavor_ref
+ self.vm_extra_args = kwargs.get('vm_extra_args', {})
+ self.floating_pool = kwargs.get('floating_pool', None)
+ self.new_volume = kwargs.get('new_volume', True)
+ self.new_server = kwargs.get('new_server', False)
+ self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+ self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+ False)
+ self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+ self.detach_match_count = kwargs.get('detach_match_count', 1)
+ self.attach_match_count = kwargs.get('attach_match_count', 2)
+ self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+ self._create_floating_ip()
+ self._create_sec_group()
+ self._create_keypair()
+ private_key = self.key['private_key']
+ username = CONF.compute.image_ssh_user
+ self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+ username,
+ pkey=private_key)
+ if not self.new_volume:
+ self._create_volume()
+ if not self.new_server:
+ self.new_server_ops()
+
+ # now we just test is number of partition increased or decrised
+ def part_wait(self, num_match):
+ def _part_state():
+ self.partitions = self.remote_client.get_partitions().split('\n')
+ matching = 0
+ for part_line in self.partitions[1:]:
+ if self.part_line_re.match(part_line):
+ matching += 1
+ return matching == num_match
+ if tempest.test.call_until_true(_part_state,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ return
+ else:
+ raise RuntimeError("Unexpected partitions: %s",
+ str(self.partitions))
+
+ def run(self):
+ if self.new_server:
+ self.new_server_ops()
+ if self.new_volume:
+ self._create_volume()
+ servers_client = self.manager.servers_client
+ self.logger.info("attach volume (%s) to vm %s" %
+ (self.volume['id'], self.server_id))
+ resp, body = servers_client.attach_volume(self.server_id,
+ self.volume['id'],
+ self.part_name)
+ assert(resp.status == 200)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'in-use')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for new block device on %s"
+ % self.server_id)
+ self.part_wait(self.attach_match_count)
+
+ resp, body = servers_client.detach_volume(self.server_id,
+ self.volume['id'])
+ assert(resp.status == 202)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for block device disapperance on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+ if self.new_volume:
+ self._delete_volume()
+ if self.new_server:
+ self._destroy_vm()
+
+ def tearDown(self):
+ cli = self.manager.floating_ips_client
+ cli.disassociate_floating_ip_from_server(self.floating['ip'],
+ self.server_id)
+ self._wait_disassociate()
+ if not self.new_server:
+ self._destroy_vm()
+ self._delete_keypair()
+ self._destroy_floating_ip()
+ self._destroy_sec_grp()
+ if not self.new_volume:
+ self._delete_volume()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..517cfd5 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -80,17 +80,23 @@
return ret
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
- terminate_all_processes()
+ for process in processes:
+ if (not process['process'].is_alive() and
+ process['process'].exitcode != 0):
+ signal.signal(signalnum, signal.SIG_DFL)
+ terminate_all_processes()
+ break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
+ LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
@@ -174,34 +180,39 @@
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
- while True:
- if max_runs is None:
- remaining = end_time - time.time()
- if remaining <= 0:
- break
- else:
- remaining = log_check_interval
- all_proc_term = True
- for process in processes:
- if process['process'].is_alive():
- all_proc_term = False
+ try:
+ while True:
+ if max_runs is None:
+ remaining = end_time - time.time()
+ if remaining <= 0:
break
- if all_proc_term:
- break
-
- time.sleep(min(remaining, log_check_interval))
- if stop_on_error:
- for process in processes:
- if process['statistic']['fails'] > 0:
+ else:
+ remaining = log_check_interval
+ all_proc_term = True
+ for process in processes:
+ if process['process'].is_alive():
+ all_proc_term = False
+ break
+ if all_proc_term:
break
- if not logfiles:
- continue
- if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
- stop_on_error):
- had_errors = True
- break
+ time.sleep(min(remaining, log_check_interval))
+ if stop_on_error:
+ if any([True for proc in processes
+ if proc['statistic']['fails'] > 0]):
+ break
+ if not logfiles:
+ continue
+ if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
+ stop_on_error):
+ had_errors = True
+ break
+ except KeyboardInterrupt:
+ LOG.warning("Interrupted, going to print statistics and exit ...")
+
+ if stop_on_error:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+ "threads": 1,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"vm_extra_args": {},
+ "new_volume": true,
+ "new_server": false,
+ "ssh_test_before_attach": false,
+ "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/tests/cmd/__init__.py b/tempest/tests/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/cmd/__init__.py
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
new file mode 100644
index 0000000..40caf30
--- /dev/null
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -0,0 +1,397 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import mock
+
+from tempest.cmd import verify_tempest_config
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestGetAPIVersions(base.TestCase):
+
+ def test_url_grab_versioned_nova_nossl(self):
+ base_url = 'http://127.0.0.1:8774/v2/'
+ endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+ self.assertEqual('http://127.0.0.1:8774', endpoint)
+
+ def test_url_grab_versioned_nova_ssl(self):
+ base_url = 'https://127.0.0.1:8774/v3/'
+ endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+ self.assertEqual('https://127.0.0.1:8774', endpoint)
+
+
+class TestDiscovery(base.TestCase):
+
+ def setUp(self):
+ super(TestDiscovery, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_get_keystone_api_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
+ self.assertIn('v2.0', versions)
+ self.assertIn('v3.0', versions)
+
+ def test_get_cinder_api_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
+ self.assertIn('v1.0', versions)
+ self.assertIn('v2.0', versions)
+
+ def test_get_nova_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
+ self.assertIn('v2.0', versions)
+ self.assertIn('v3.0', versions)
+
+ def test_verify_keystone_api_versions_no_v3(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v3',
+ 'identity_feature_enabled',
+ False, True)
+
+ def test_verify_keystone_api_versions_no_v2(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2',
+ 'identity_feature_enabled',
+ False, True)
+
+ def test_verify_cinder_api_versions_no_v2(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v1.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
+ False, True)
+
+ def test_verify_cinder_api_versions_no_v1(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
+ False, True)
+
+ def test_verify_nova_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_nova_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v3', 'compute_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v2_with_v1_1(self):
+ def fake_get_versions():
+ return (None, ['v1.1'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v2_with_v1_0(self):
+ def fake_get_versions():
+ return (None, ['v1.0'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v1(self):
+ def fake_get_versions():
+ return (None, ['v2.0'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_extensions_neutron(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.network_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'neutron', {})
+ self.assertIn('neutron', results)
+ self.assertIn('fake1', results['neutron'])
+ self.assertTrue(results['neutron']['fake1'])
+ self.assertIn('fake2', results['neutron'])
+ self.assertTrue(results['neutron']['fake2'])
+ self.assertIn('fake3', results['neutron'])
+ self.assertFalse(results['neutron']['fake3'])
+ self.assertIn('not_fake', results['neutron'])
+ self.assertFalse(results['neutron']['not_fake'])
+
+ def test_verify_extensions_neutron_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.network_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'neutron', {})
+ self.assertIn('neutron', results)
+ self.assertIn('extensions', results['neutron'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['neutron']['extensions'])
+
+ def test_verify_extensions_cinder(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'cinder', {})
+ self.assertIn('cinder', results)
+ self.assertIn('fake1', results['cinder'])
+ self.assertTrue(results['cinder']['fake1'])
+ self.assertIn('fake2', results['cinder'])
+ self.assertTrue(results['cinder']['fake2'])
+ self.assertIn('fake3', results['cinder'])
+ self.assertFalse(results['cinder']['fake3'])
+ self.assertIn('not_fake', results['cinder'])
+ self.assertFalse(results['cinder']['not_fake'])
+
+ def test_verify_extensions_cinder_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'cinder', {})
+ self.assertIn('cinder', results)
+ self.assertIn('extensions', results['cinder'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['cinder']['extensions'])
+
+ def test_verify_extensions_nova(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova', {})
+ self.assertIn('nova', results)
+ self.assertIn('fake1', results['nova'])
+ self.assertTrue(results['nova']['fake1'])
+ self.assertIn('fake2', results['nova'])
+ self.assertTrue(results['nova']['fake2'])
+ self.assertIn('fake3', results['nova'])
+ self.assertFalse(results['nova']['fake3'])
+ self.assertIn('not_fake', results['nova'])
+ self.assertFalse(results['nova']['not_fake'])
+
+ def test_verify_extensions_nova_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova', {})
+ self.assertIn('nova', results)
+ self.assertIn('extensions', results['nova'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['nova']['extensions'])
+
+ def test_verify_extensions_nova_v3(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova_v3', {})
+ self.assertIn('nova_v3', results)
+ self.assertIn('fake1', results['nova_v3'])
+ self.assertTrue(results['nova_v3']['fake1'])
+ self.assertIn('fake2', results['nova_v3'])
+ self.assertTrue(results['nova_v3']['fake2'])
+ self.assertIn('fake3', results['nova_v3'])
+ self.assertFalse(results['nova_v3']['fake3'])
+ self.assertIn('not_fake', results['nova_v3'])
+ self.assertFalse(results['nova_v3']['not_fake'])
+
+ def test_verify_extensions_nova_v3_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova_v3', {})
+ self.assertIn('nova_v3', results)
+ self.assertIn('extensions', results['nova_v3'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['nova_v3']['extensions'])
+
+ def test_verify_extensions_swift(self):
+ def fake_list_extensions():
+ return (None, {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'})
+ fake_os = mock.MagicMock()
+ fake_os.account_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
+ self.assertIn('swift', results)
+ self.assertIn('fake1', results['swift'])
+ self.assertTrue(results['swift']['fake1'])
+ self.assertIn('fake2', results['swift'])
+ self.assertTrue(results['swift']['fake2'])
+ self.assertIn('fake3', results['swift'])
+ self.assertFalse(results['swift']['fake3'])
+ self.assertIn('not_fake', results['swift'])
+ self.assertFalse(results['swift']['not_fake'])
+
+ def test_verify_extensions_swift_all(self):
+ def fake_list_extensions():
+ return (None, {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'})
+ fake_os = mock.MagicMock()
+ fake_os.account_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'swift', {})
+ self.assertIn('swift', results)
+ self.assertIn('extensions', results['swift'])
+ self.assertEqual(['not_fake', 'fake1', 'fake2'],
+ results['swift']['extensions'])
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..ddffb4a 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.tests import fake_credentials
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+ return fake_credentials.FakeCredentials()
+
class FakeAuthProvider(object):
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4676cbd..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -45,6 +45,16 @@
os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
self.conf.set_default('lock_path',
str(os.environ.get('OS_TEST_LOCK_PATH')))
+ self.conf.set_default('auth_version', 'v2', group='identity')
+ for config_option in ['username', 'password', 'tenant_name']:
+ # Identity group items
+ for prefix in ['', 'alt_', 'admin_']:
+ self.conf.set_default(prefix + config_option,
+ 'fake_' + config_option,
+ group='identity')
+ # Compute Admin group items
+ self.conf.set_default(config_option, 'fake_' + config_option,
+ group='compute-admin')
class FakePrivate(config.TempestConfigPrivate):
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..a372973
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+ def is_valid(self):
+ return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ tenant_name='fake_tenant_name'
+ )
+ super(FakeKeystoneV2Credentials, self).__init__(**creds)
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 62c20e3..7b5b4d6 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -22,18 +22,16 @@
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
+from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
+from tempest.tests import fake_credentials
from tempest.tests import fake_http
from tempest.tests import fake_identity
class BaseAuthTestsSetUp(base.TestCase):
_auth_provider_class = None
- credentials = {
- 'username': 'fake_user',
- 'password': 'fake_pwd',
- 'tenant_name': 'fake_tenant'
- }
+ credentials = fake_credentials.FakeCredentials()
def _auth(self, credentials, **params):
"""
@@ -47,6 +45,8 @@
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.stubs.Set(auth, 'get_credentials',
+ fake_auth_provider.get_credentials)
self.auth_provider = self._auth(self.credentials)
@@ -58,12 +58,19 @@
"""
_auth_provider_class = auth.AuthProvider
- def test_check_credentials_is_dict(self):
- self.assertTrue(self.auth_provider.check_credentials({}))
+ def test_check_credentials_class(self):
+ self.assertRaises(NotImplementedError,
+ self.auth_provider.check_credentials,
+ auth.Credentials())
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
+ def test_instantiate_with_dict(self):
+ # Dict credentials are only supported for backward compatibility
+ auth_provider = self._auth(credentials={})
+ self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
def test_instantiate_with_bad_credentials_type(self):
"""
Assure that credentials with bad type fail with TypeError
@@ -104,6 +111,7 @@
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
_endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
_auth_provider_class = auth.KeystoneV2AuthProvider
+ credentials = fake_credentials.FakeKeystoneV2Credentials()
def setUp(self):
super(TestKeystoneV2AuthProvider, self).setUp()
@@ -210,17 +218,6 @@
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred))
- def test_check_credentials_not_scoped_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertTrue(self.auth_provider.check_credentials(cred,
- scoped=False))
-
- def test_check_credentials_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertFalse(self.auth_provider.check_credentials(cred))
-
def _test_base_url_helper(self, expected_url, filters,
auth_data=None):
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..86600fa
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,112 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+ attributes = {}
+ credentials_class = auth.Credentials
+
+ def _get_credentials(self, attributes=None):
+ if attributes is None:
+ attributes = self.attributes
+ return self.credentials_class(**attributes)
+
+ def setUp(self):
+ super(CredentialsTests, self).setUp()
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_create_invalid_attr(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ self._get_credentials,
+ attributes=dict(invalid='fake'))
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ self.assertRaises(NotImplementedError,
+ self.credentials_class.get_default,
+ credentials_type=ctype)
+
+ def test_invalid_default(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ auth.Credentials.get_default,
+ credentials_type='invalid_type')
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+ attributes = {
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ 'tenant_name': 'fake_tenant_name'
+ }
+
+ identity_response = fake_identity._fake_v2_response
+ credentials_class = auth.KeystoneV2Credentials
+
+ def setUp(self):
+ super(KeystoneV2CredentialsTests, self).setUp()
+ self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def _verify_credentials(self, credentials_class, creds_dict):
+ creds = auth.get_credentials(**creds_dict)
+ # Check the right version of credentials has been returned
+ self.assertIsInstance(creds, credentials_class)
+ # Check the id attributes are filled in
+ attributes = [x for x in creds.ATTRIBUTES if (
+ '_id' in x and x != 'domain_id')]
+ for attr in attributes:
+ self.assertIsNone(getattr(creds, attr))
+
+ def test_get_credentials(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(self.credentials_class, self.attributes)
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertTrue(creds.is_valid())
+
+ def test_is_not_valid(self):
+ creds = self._get_credentials()
+ for attr in self.attributes.keys():
+ delattr(creds, attr)
+ self.assertFalse(creds.is_valid(),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ creds = self.credentials_class.get_default(credentials_type=ctype)
+ for attr in self.attributes.keys():
+ # Default configuration values related to credentials
+ # are defined as fake_* in fake_config.py
+ self.assertEqual(getattr(creds, attr), 'fake_' + attr)
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
new file mode 100644
index 0000000..f584cb9
--- /dev/null
+++ b/tempest/tests/test_hacking.py
@@ -0,0 +1,26 @@
+# Copyright 2014 Matthew Treinish
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.hacking import checks
+from tempest.tests import base
+
+
+class HackingTestCase(base.TestCase):
+ def test_no_setupclass_for_unit_tests(self):
+ self.assertTrue(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls):", './tempest/tests/fake_test.py'))
+ self.assertIsNone(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
+ self.assertFalse(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls):", './tempest/api/fake_test.py'))
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
deleted file mode 100755
index 1726beb..0000000
--- a/tools/verify_tempest_config.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import sys
-import urlparse
-
-import httplib2
-
-from tempest import clients
-from tempest import config
-
-
-CONF = config.CONF
-RAW_HTTP = httplib2.Http()
-
-
-def verify_glance_api_versions(os):
- # Check glance api versions
- __, versions = os.image_client.get_versions()
- if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
- versions):
- print('Config option image api_v1 should be change to: %s' % (
- not CONF.image_feature_enabled.api_v1))
- if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
- print('Config option image api_v2 should be change to: %s' % (
- not CONF.image_feature_enabled.api_v2))
-
-
-def _get_api_versions(os, service):
- client_dict = {
- 'nova': os.servers_client,
- 'keystone': os.identity_client,
- 'cinder': os.volumes_client,
- }
- client_dict[service].skip_path()
- endpoint_parts = urlparse.urlparse(client_dict[service].base_url)
- endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
- __, body = RAW_HTTP.request(endpoint, 'GET')
- client_dict[service].reset_path()
- body = json.loads(body)
- if service == 'keystone':
- versions = map(lambda x: x['id'], body['versions']['values'])
- else:
- versions = map(lambda x: x['id'], body['versions'])
- return versions
-
-
-def verify_keystone_api_versions(os):
- # Check keystone api versions
- versions = _get_api_versions(os, 'keystone')
- if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
- print('Config option identity api_v2 should be change to %s' % (
- not CONF.identity_feature_enabled.api_v2))
- if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
- print('Config option identity api_v3 should be change to %s' % (
- not CONF.identity_feature_enabled.api_v3))
-
-
-def verify_nova_api_versions(os):
- versions = _get_api_versions(os, 'nova')
- if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
- print('Config option compute api_v3 should be change to: %s' % (
- not CONF.compute_feature_enabled.api_v3))
-
-
-def verify_cinder_api_versions(os):
- # Check cinder api versions
- versions = _get_api_versions(os, 'cinder')
- if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
- print('Config option volume api_v2 should be change to: %s' % (
- not CONF.volume_feature_enabled.api_v1))
- if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
- print('Config option volume api_v2 should be change to: %s' % (
- not CONF.volume_feature_enabled.api_v2))
-
-
-def get_extension_client(os, service):
- extensions_client = {
- 'nova': os.extensions_client,
- 'nova_v3': os.extensions_v3_client,
- 'cinder': os.volumes_extension_client,
- 'neutron': os.network_client,
- 'swift': os.account_client,
- }
- if service not in extensions_client:
- print('No tempest extensions client for %s' % service)
- exit(1)
- return extensions_client[service]
-
-
-def get_enabled_extensions(service):
- extensions_options = {
- 'nova': CONF.compute_feature_enabled.api_extensions,
- 'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
- 'cinder': CONF.volume_feature_enabled.api_extensions,
- 'neutron': CONF.network_feature_enabled.api_extensions,
- 'swift': CONF.object_storage_feature_enabled.discoverable_apis,
- }
- if service not in extensions_options:
- print('No supported extensions list option for %s' % service)
- exit(1)
- return extensions_options[service]
-
-
-def verify_extensions(os, service, results):
- extensions_client = get_extension_client(os, service)
- __, resp = extensions_client.list_extensions()
- if isinstance(resp, dict):
- # Neutron's extension 'name' field has is not a single word (it has
- # spaces in the string) Since that can't be used for list option the
- # api_extension option in the network-feature-enabled group uses alias
- # instead of name.
- if service == 'neutron':
- extensions = map(lambda x: x['alias'], resp['extensions'])
- elif service == 'swift':
- # Remove Swift general information from extensions list
- resp.pop('swift')
- extensions = resp.keys()
- else:
- extensions = map(lambda x: x['name'], resp['extensions'])
-
- else:
- extensions = map(lambda x: x['name'], resp)
- if not results.get(service):
- results[service] = {}
- extensions_opt = get_enabled_extensions(service)
- if extensions_opt[0] == 'all':
- results[service]['extensions'] = 'all'
- return results
- # Verify that all configured extensions are actually enabled
- for extension in extensions_opt:
- results[service][extension] = extension in extensions
- # Verify that there aren't additional extensions enabled that aren't
- # specified in the config list
- for extension in extensions:
- if extension not in extensions_opt:
- results[service][extension] = False
- return results
-
-
-def display_results(results):
- for service in results:
- # If all extensions are specified as being enabled there is no way to
- # verify this so we just assume this to be true
- if results[service].get('extensions'):
- continue
- extension_list = get_enabled_extensions(service)
- for extension in results[service]:
- if not results[service][extension]:
- if extension in extension_list:
- print("%s extension: %s should not be included in the list"
- " of enabled extensions" % (service, extension))
- else:
- print("%s extension: %s should be included in the list of "
- "enabled extensions" % (service, extension))
-
-
-def check_service_availability(os):
- services = []
- avail_services = []
- codename_match = {
- 'volume': 'cinder',
- 'network': 'neutron',
- 'image': 'glance',
- 'object_storage': 'swift',
- 'compute': 'nova',
- 'orchestration': 'heat',
- 'metering': 'ceilometer',
- 'telemetry': 'ceilometer',
- 'data_processing': 'sahara',
- 'baremetal': 'ironic',
- 'identity': 'keystone',
- 'queuing': 'marconi',
- 'database': 'trove'
- }
- # Get catalog list for endpoints to use for validation
- __, endpoints = os.endpoints_client.list_endpoints()
- for endpoint in endpoints:
- __, service = os.service_client.get_service(endpoint['service_id'])
- services.append(service['type'])
- # Pull all catalog types from config file and compare against endpoint list
- for cfgname in dir(CONF._config):
- cfg = getattr(CONF, cfgname)
- catalog_type = getattr(cfg, 'catalog_type', None)
- if not catalog_type:
- continue
- else:
- if cfgname == 'identity':
- # Keystone is a required service for tempest
- continue
- if catalog_type not in services:
- if getattr(CONF.service_available, codename_match[cfgname]):
- print('Endpoint type %s not found either disable service '
- '%s or fix the catalog_type in the config file' % (
- catalog_type, codename_match[cfgname]))
- else:
- if not getattr(CONF.service_available,
- codename_match[cfgname]):
- print('Endpoint type %s is available, service %s should be'
- ' set as available in the config file.' % (
- catalog_type, codename_match[cfgname]))
- else:
- avail_services.append(codename_match[cfgname])
- return avail_services
-
-
-def main(argv):
- print('Running config verification...')
- os = clients.ComputeAdminManager(interface='json')
- services = check_service_availability(os)
- results = {}
- for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
- if service == 'nova_v3' and 'nova' not in services:
- continue
- elif service not in services:
- continue
- results = verify_extensions(os, service, results)
- verify_keystone_api_versions(os)
- verify_glance_api_versions(os)
- verify_nova_api_versions(os)
- verify_cinder_api_versions(os)
- display_results(results)
-
-
-if __name__ == "__main__":
- main(sys.argv)