Merge "Verify create/get flavor attributes of Nova APIs"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 70c791b..4a567e7 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -250,6 +250,19 @@
# Should the tests ssh to instances? (boolean value)
#run_ssh=false
+# Auth method used for authenticate to the instance. Valid
+# choices are: keypair, configured, adminpass. keypair: start
+# the servers with an ssh keypair. configured: use the
+# configured user and password. adminpass: use the injected
+# adminPass. disabled: avoid using ssh when it is an option.
+# (string value)
+#ssh_auth_method=keypair
+
+# How to connect to the instance? fixed: using the first ip
+# belongs the fixed network floating: creating and using a
+# floating ip (string value)
+#ssh_connect_method=fixed
+
# User name used to authenticate to an instance. (string
# value)
#ssh_user=root
diff --git a/requirements.txt b/requirements.txt
index e97eece..fe3e5e5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,7 +20,7 @@
keyring>=2.1
testrepository>=0.0.18
oslo.config>=1.2.0
-six>=1.5.2
+six>=1.6.0
iso8601>=0.1.9
fixtures>=0.3.14
testscenarios>=0.4
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index d1bda83..a65d7b7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -203,6 +203,13 @@
LOG.warn("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?" % volume_id)
+ @classmethod
+ def prepare_instance_network(cls):
+ if (CONF.compute.ssh_auth_method != 'disabled' and
+ CONF.compute.ssh_connect_method == 'floating'):
+ cls.set_network_resources(network=True, subnet=True, router=True,
+ dhcp=True)
+
class BaseV2ComputeTest(BaseComputeTest):
@@ -312,11 +319,6 @@
@classmethod
def setUpClass(cls):
# By default compute tests do not create network resources
- if cls._interface == "xml":
- skip_msg = ("XML interface is being removed from Nova v3. "
- "%s will be removed shortly" % cls.__name__)
- raise cls.skipException(skip_msg)
-
if not CONF.compute_feature_enabled.api_v3:
skip_msg = ("%s skipped as nova v3 api is not available" %
cls.__name__)
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index f0a8c8d..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -32,6 +32,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersTestJSON, cls).setUpClass()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
@@ -53,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['adminPass'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -114,26 +108,10 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
- cls.client = cls.servers_client
cls.flavor_client = cls.os_adm.flavors_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- accessIPv4=cls.accessIPv4,
- accessIPv6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['adminPass']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+ cls.client = cls.servers_client
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@@ -141,7 +119,7 @@
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 64
@@ -154,12 +132,12 @@
ram, vcpus, disk,
flavor_with_eph_disk_id,
ephemeral=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -172,18 +150,18 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
self.assertEqual(resp.status, 202)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
@@ -196,13 +174,18 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index cc2d1ee..1f2bca9 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -44,6 +44,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServerActionsTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.server_id = cls.rebuild_server(None)
diff --git a/tempest/api/compute/v3/servers/test_attach_volume.py b/tempest/api/compute/v3/servers/test_attach_volume.py
index 28d8517..e994c7f 100644
--- a/tempest/api/compute/v3/servers/test_attach_volume.py
+++ b/tempest/api/compute/v3/servers/test_attach_volume.py
@@ -33,6 +33,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(AttachVolumeV3Test, cls).setUpClass()
cls.device = CONF.compute.volume_device_name
if not CONF.service_available.cinder:
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index 80c40a2..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -32,6 +32,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersV3Test, cls).setUpClass()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
@@ -53,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['admin_password'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4,
@@ -115,26 +109,10 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
cls.flavor_client = cls.flavors_admin_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- access_ip_v4=cls.accessIPv4,
- access_ip_v6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['admin_password']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@@ -142,7 +120,7 @@
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 512
@@ -154,13 +132,13 @@
create_flavor(flavor_with_eph_disk_name,
ram, vcpus, disk,
flavor_with_eph_disk_id,
- ephemeral=1, swap=1024, rxtx=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ ephemeral=1, rxtx=1))
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -173,18 +151,18 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
- self.assertEqual(resp.status, 202)
+ self.assertEqual(resp.status, 204)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
@@ -197,13 +175,17 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
-
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index ab1ccc4..1495cb7 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -41,6 +41,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServerActionsV3Test, cls).setUpClass()
cls.client = cls.servers_client
cls.server_id = cls.rebuild_server(None)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ab9d144..4585912 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -33,6 +33,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(AttachVolumeTestJSON, cls).setUpClass()
cls.device = CONF.compute.volume_device_name
if not CONF.service_available.cinder:
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 84d5be6..fc313f2 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -14,6 +14,7 @@
# limitations under the License.
from tempest import config
+from tempest import exceptions
import tempest.test
@@ -27,46 +28,35 @@
def setUpClass(cls):
super(BaseDataProcessingTest, cls).setUpClass()
if not CONF.service_available.sahara:
- raise cls.skipException("Sahara support is required")
+ raise cls.skipException('Sahara support is required')
os = cls.get_client_manager()
cls.client = os.data_processing_client
- # set some constants
cls.flavor_ref = CONF.compute.flavor_ref
- cls.simple_node_group_template = {
- 'plugin_name': 'vanilla',
- 'hadoop_version': '1.2.1',
- 'node_processes': [
- "datanode",
- "tasktracker"
- ],
- 'flavor_id': cls.flavor_ref,
- 'node_configs': {
- 'HDFS': {
- 'Data Node Heap Size': 1024
- },
- 'MapReduce': {
- 'Task Tracker Heap Size': 1024
- }
- }
- }
# add lists for watched resources
cls._node_group_templates = []
+ cls._cluster_templates = []
@classmethod
def tearDownClass(cls):
- # cleanup node group templates
- for ngt_id in getattr(cls, '_node_group_templates', []):
- try:
- cls.client.delete_node_group_template(ngt_id)
- except Exception:
- # ignore errors while auto removing created resource
- pass
+ cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
+ cls.client.delete_cluster_template)
+ cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
+ cls.client.delete_node_group_template)
cls.clear_isolated_creds()
super(BaseDataProcessingTest, cls).tearDownClass()
+ @staticmethod
+ def cleanup_resources(resource_id_list, method):
+ for resource_id in resource_id_list:
+ try:
+ method(resource_id)
+ except exceptions.NotFound:
+ # ignore errors while auto removing created resource
+ pass
+
@classmethod
def create_node_group_template(cls, name, plugin_name, hadoop_version,
node_processes, flavor_id,
@@ -77,16 +67,32 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
-
resp, body = cls.client.create_node_group_template(name, plugin_name,
hadoop_version,
node_processes,
flavor_id,
node_configs,
**kwargs)
-
# store id of created node group template
- template_id = body['id']
- cls._node_group_templates.append(template_id)
+ cls._node_group_templates.append(body['id'])
- return resp, body, template_id
+ return resp, body
+
+ @classmethod
+ def create_cluster_template(cls, name, plugin_name, hadoop_version,
+ node_groups, cluster_configs=None, **kwargs):
+ """Creates watched cluster template with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object. All resources created in this method will be automatically
+ removed in tearDownClass method.
+ """
+ resp, body = cls.client.create_cluster_template(name, plugin_name,
+ hadoop_version,
+ node_groups,
+ cluster_configs,
+ **kwargs)
+ # store id of created cluster template
+ cls._cluster_templates.append(body['id'])
+
+ return resp, body
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index a64c345..ed4cf1f 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -19,65 +19,87 @@
class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
- def _create_simple_node_group_template(self, template_name=None):
- """Creates simple Node Group Template with optional name specified.
+ @classmethod
+ def setUpClass(cls):
+ super(NodeGroupTemplateTest, cls).setUpClass()
+ cls.node_group_template = {
+ 'description': 'Test node group template',
+ 'plugin_name': 'vanilla',
+ 'hadoop_version': '1.2.1',
+ 'node_processes': [
+ 'datanode',
+ 'tasktracker'
+ ],
+ 'flavor_id': cls.flavor_ref,
+ 'node_configs': {
+ 'HDFS': {
+ 'Data Node Heap Size': 1024
+ },
+ 'MapReduce': {
+ 'Task Tracker Heap Size': 1024
+ }
+ }
+ }
+
+ def _create_node_group_template(self, template_name=None):
+ """Creates Node Group Template with optional name specified.
It creates template and ensures response status and template name.
Returns id and name of created template.
"""
-
if template_name is None:
# generate random name if it's not specified
- template_name = data_utils.rand_name('sahara')
+ template_name = data_utils.rand_name('sahara-ng-template')
- # create simple node group template
- resp, body, template_id = self.create_node_group_template(
- template_name, **self.simple_node_group_template)
+ # create node group template
+ resp, body = self.create_node_group_template(
+ template_name, **self.node_group_template)
# ensure that template created successfully
self.assertEqual(202, resp.status)
self.assertEqual(template_name, body['name'])
- return template_id, template_name
+ return body['id'], template_name
@attr(type='smoke')
def test_node_group_template_create(self):
- # just create and ensure template
- self._create_simple_node_group_template()
+ template_name = data_utils.rand_name('sahara-ng-template')
+ resp, body = self.create_node_group_template(
+ template_name, **self.node_group_template)
+
+ # check that template created successfully
+ self.assertEqual(resp.status, 202)
+ self.assertEqual(template_name, body['name'])
+ self.assertDictContainsSubset(self.node_group_template, body)
@attr(type='smoke')
def test_node_group_template_list(self):
- template_info = self._create_simple_node_group_template()
+ template_info = self._create_node_group_template()
# check for node group template in list
resp, templates = self.client.list_node_group_templates()
self.assertEqual(200, resp.status)
- templates_info = list([(template['id'], template['name'])
- for template in templates])
+ templates_info = [(template['id'], template['name'])
+ for template in templates]
self.assertIn(template_info, templates_info)
@attr(type='smoke')
def test_node_group_template_get(self):
- template_id, template_name = self._create_simple_node_group_template()
+ template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
resp, template = self.client.get_node_group_template(template_id)
self.assertEqual(200, resp.status)
self.assertEqual(template_name, template['name'])
- self.assertEqual(self.simple_node_group_template['plugin_name'],
- template['plugin_name'])
- self.assertEqual(self.simple_node_group_template['node_processes'],
- template['node_processes'])
- self.assertEqual(self.simple_node_group_template['flavor_id'],
- template['flavor_id'])
+ self.assertDictContainsSubset(self.node_group_template, template)
@attr(type='smoke')
def test_node_group_template_delete(self):
- template_id, template_name = self._create_simple_node_group_template()
+ template_id = self._create_node_group_template()[0]
# delete the node group template by id
- resp = self.client.delete_node_group_template(template_id)
+ resp = self.client.delete_node_group_template(template_id)[0]
- self.assertEqual('204', resp[0]['status'])
+ self.assertEqual(204, resp.status)
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 0647069..555cbda 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -14,9 +14,12 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class FWaaSExtensionTestJSON(base.BaseNetworkTest):
_interface = 'json'
@@ -67,6 +70,20 @@
except exceptions.NotFound:
pass
+ self.client.wait_for_resource_deletion('firewall', fw_id)
+
+ def _wait_for_active(self, fw_id):
+ def _wait():
+ resp, firewall = self.client.show_firewall(fw_id)
+ self.assertEqual('200', resp['status'])
+ firewall = firewall['firewall']
+ return firewall['status'] == 'ACTIVE'
+
+ if not test.call_until_true(_wait, CONF.network.build_timeout,
+ CONF.network.build_interval):
+ m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+ raise exceptions.TimeoutException(m)
+
@test.attr(type='smoke')
def test_list_firewall_rules(self):
# List firewall rules
@@ -168,6 +185,15 @@
@test.attr(type='smoke')
def test_create_show_delete_firewall(self):
+ # Create tenant network resources required for an ACTIVE firewall
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ router = self.create_router(
+ data_utils.rand_name('router-'),
+ admin_state_up=True)
+ self.client.add_router_interface_with_subnet_id(
+ router['id'], subnet['id'])
+
# Create firewall
resp, body = self.client.create_firewall(
name=data_utils.rand_name("firewall"),
@@ -177,11 +203,16 @@
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
+ self._wait_for_active(firewall_id)
+
# show a created firewall
resp, firewall = self.client.show_firewall(firewall_id)
self.assertEqual('200', resp['status'])
firewall = firewall['firewall']
+
for key, value in firewall.iteritems():
+ if key == 'status':
+ continue
self.assertEqual(created_firewall[key], value)
# list firewall
@@ -198,9 +229,6 @@
# Delete firewall
resp, _ = self.client.delete_firewall(firewall_id)
self.assertEqual('204', resp['status'])
- # Confirm deletion
- # TODO(raies): Confirm deletion can be done only when,
- # deleted firewall status is not "PENDING_DELETE".
class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 29afebc..5a79529 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -122,6 +122,7 @@
{"Quota-Bytes": "100"})
@test.attr(type=["negative", "smoke"])
+ @test.skip_because(bug="1310597")
@test.requires_ext(extension='account_quotas', service='object')
def test_upload_large_object(self):
object_name = data_utils.rand_name(name="TestObject")
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index c27bedf..4e40de9 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,7 +30,7 @@
@classmethod
def setUpClass(cls):
super(BaseOrchestrationTest, cls).setUpClass()
- cls.os = clients.OrchestrationManager()
+ cls.os = clients.Manager()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
cls.build_timeout = CONF.orchestration.build_timeout
@@ -41,6 +41,7 @@
cls.servers_client = cls.os.servers_client
cls.keypairs_client = cls.os.keypairs_client
cls.network_client = cls.os.network_client
+ cls.volumes_client = cls.os.volumes_client
cls.stacks = []
cls.keypairs = []
@@ -136,3 +137,8 @@
return dict((r['resource_name'], r['resource_type'])
for r in resources)
+
+ def get_stack_output(self, stack_identifier, output_key):
+ resp, body = self.client.get_stack(stack_identifier)
+ self.assertEqual('200', resp['status'])
+ return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..3e03a30
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
index 275d040..63b03f4 100644
--- a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -36,7 +36,6 @@
admin_state_up: false
external_gateway_info:
network: {get_param: ExternalNetworkId}
- enable_snat: false
RouterInterface:
type: OS::Neutron::RouterInterface
properties:
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index b96f6ce..3086d78 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -137,8 +137,6 @@
self.assertEqual('NewRouter', router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
- self.assertEqual(False,
- router['external_gateway_info']['enable_snat'])
self.assertEqual(False, router['admin_state_up'])
@test.attr(type='slow')
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index cb5d941..4b845b1 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -110,14 +110,6 @@
# wait for create to complete.
self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
# This is an assert of great significance, as it means the following
# has happened:
# - cfn-init read the provided metadata and wrote out a file
@@ -125,5 +117,5 @@
# - a cfn-signal was built which was signed with provided credentials
# - the wait condition was fulfilled and the stack has changed state
wait_status = json.loads(
- self.stack_output(body, 'WaitConditionStatus'))
+ self.get_stack_output(sid, 'WaitConditionStatus'))
self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..a60cd37
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,58 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(CinderResourcesTest, cls).setUpClass()
+ if not CONF.service_available.cinder:
+ raise cls.skipException('Cinder support is required')
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete(self):
+ """Create and delete a volume via OS::Cinder::Volume."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self.assertIsNotNone(volume_id)
+ resp, volume = self.volumes_client.get_volume(volume_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual('available', volume.get('status'))
+ self.assertEqual(1, volume.get('size'))
+ self.assertEqual('a descriptive description',
+ volume.get('display_description'))
+
+ # Verify the stack outputs are as expected
+ self.assertEqual('available',
+ self.get_stack_output(stack_identifier, 'status'))
+ self.assertEqual('1',
+ self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual('a descriptive description',
+ self.get_stack_output(stack_identifier,
+ 'display_description'))
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4496f18..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -85,24 +85,6 @@
self.volume['id'])
self.assertEqual('error', volume_get['status'])
- @test.attr(type='gate')
- def test_volume_begin_detaching(self):
- # test volume begin detaching : available -> detaching -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('detaching', volume_get['status'])
-
- @test.attr(type='gate')
- def test_volume_roll_detaching(self):
- # test volume roll detaching : detaching -> in-use -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp, body = self.client.volume_roll_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('in-use', volume_get['status'])
-
def test_volume_force_delete_when_volume_is_creating(self):
# test force delete when status of volume is creating
self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
new file mode 100644
index 0000000..b04cf64
--- /dev/null
+++ b/tempest/api_schema/compute/agents.py
@@ -0,0 +1,40 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_agents = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'agents': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'agent_id': {'type': ['integer', 'string']},
+ 'hypervisor': {'type': 'string'},
+ 'os': {'type': 'string'},
+ 'architecture': {'type': 'string'},
+ 'version': {'type': 'string'},
+ 'url': {'type': 'string', 'format': 'uri'},
+ 'md5hash': {'type': 'string'}
+ },
+ 'required': ['agent_id', 'hypervisor', 'os',
+ 'architecture', 'version', 'url', 'md5hash']
+ }
+ }
+ },
+ 'required': ['agents']
+ }
+}
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+_common_schema = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'certificate': {
+ 'type': 'object',
+ 'properties': {
+ 'data': {'type': 'string'},
+ 'private_key': {'type': 'string'},
+ },
+ 'required': ['data', 'private_key'],
+ }
+ },
+ 'required': ['certificate'],
+ }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+ 'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index a273abb..31cd56b 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
get_password = {
'status_code': [200],
'response_body': {
@@ -47,3 +49,16 @@
delete_server = {
'status_code': [204],
}
+
+set_server_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': {'type': 'object'}
+ },
+ 'required': ['metadata']
+ }
+}
+
+list_server_metadata = copy.deepcopy(set_server_metadata)
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..de3e12b
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+delete_aggregate = {
+ 'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index fad6b56..e97752d 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -14,43 +14,46 @@
from tempest.api_schema.compute import parameter_types
+common_image_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'minDisk': {'type': 'integer'},
+ 'minRam': {'type': 'integer'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+ },
+ # 'server' attributes only comes in response body if image is
+ # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+ # extension, So those are not defined as 'required'.
+ 'required': ['id', 'status', 'updated', 'links', 'name',
+ 'created', 'minDisk', 'minRam', 'progress',
+ 'metadata']
+}
+
get_image = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'status': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'links': parameter_types.links,
- 'name': {'type': 'string'},
- 'created': {'type': 'string'},
- 'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
- 'minDisk': {'type': 'integer'},
- 'minRam': {'type': 'integer'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'server': {
- 'type': 'object',
- 'properties': {
- # NOTE: Now the type of 'id' is integer, but here
- # allows 'string' also because we will be able to
- # change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- }
- },
- # 'server' attributes only comes in response body if image is
- # associated with any server. So it is not 'required'
- 'required': ['id', 'status', 'updated', 'links', 'name',
- 'created', 'OS-EXT-IMG-SIZE:size', 'minDisk',
- 'minRam', 'progress', 'metadata']
- }
+ 'image': common_image_schema
},
'required': ['image']
}
@@ -67,20 +70,7 @@
'type': 'object',
'properties': {
'id': {'type': 'string'},
- 'links': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'href': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'rel': {'type': 'string'}
- },
- 'required': ['href', 'rel']
- }
- },
+ 'links': parameter_types.links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
@@ -120,3 +110,17 @@
'required': ['meta']
}
}
+
+list_images_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'images': {
+ 'type': 'array',
+ 'items': common_image_schema
+ }
+ },
+ 'required': ['images']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 6dd44cd..8b4bead 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -12,6 +12,49 @@
# License for the specific language governing permissions and limitations
# under the License.
+common_security_group_rule = {
+ 'from_port': {'type': ['integer', 'null']},
+ 'to_port': {'type': ['integer', 'null']},
+ 'group': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ }
+ },
+ 'ip_protocol': {'type': ['string', 'null']},
+ # 'parent_group_id' can be UUID so defining it as 'string' also.
+ 'parent_group_id': {'type': ['string', 'integer', 'null']},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ }
+ # When optional argument is provided in request body
+ # like 'group_id' then, attribute 'cidr' does not
+ # comes in response body. So it is not 'required'.
+ },
+ 'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'rules': {
+ 'type': 'array',
+ 'items': {
+ 'type': ['object', 'null'],
+ 'properties': common_security_group_rule
+ }
+ },
+ 'description': {'type': 'string'},
+ },
+ 'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
list_security_groups = {
'status_code': [200],
'response_body': {
@@ -19,24 +62,24 @@
'properties': {
'security_groups': {
'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': ['integer', 'string']},
- 'name': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'rules': {'type': 'array'},
- 'description': {'type': 'string'},
- },
- 'required': ['id', 'name', 'tenant_id', 'rules',
- 'description'],
- }
+ 'items': common_security_group
}
},
'required': ['security_groups']
}
}
+get_security_group = create_security_group = update_security_group = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group': common_security_group
+ },
+ 'required': ['security_group']
+ }
+}
+
create_security_group_rule = {
'status_code': [200],
'response_body': {
@@ -44,25 +87,7 @@
'properties': {
'security_group_rule': {
'type': 'object',
- 'properties': {
- 'from_port': {'type': 'integer'},
- 'to_port': {'type': 'integer'},
- 'group': {'type': 'object'},
- 'ip_protocol': {'type': 'string'},
- # 'parent_group_id' can be UUID so defining it
- # as 'string' also.
- 'parent_group_id': {'type': ['integer', 'string']},
- 'id': {'type': ['integer', 'string']},
- 'ip_range': {
- 'type': 'object',
- 'properties': {
- 'cidr': {'type': 'string'}
- }
- # When optional argument is provided in request body
- # like 'group_id' then, attribute 'cidr' does not
- # comes in response body. So it is not 'required'.
- }
- },
+ 'properties': common_security_group_rule,
'required': ['from_port', 'to_port', 'group', 'ip_protocol',
'parent_group_id', 'id', 'ip_range']
}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..358e455
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+delete_aggregate = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/auth.py b/tempest/auth.py
index 5fc923f..ac8cbd1 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
:param client_type: 'tempest' or 'official'
:param interface: 'json' or 'xml'. Applicable for tempest client only
"""
+ credentials = self._convert_credentials(credentials)
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
- self.credentials = credentials
self.client_type = client_type
self.interface = interface
if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
self.alt_auth_data = None
self.alt_part = None
+ def _convert_credentials(self, credentials):
+ # Support dict credentials for backwards compatibility
+ if isinstance(credentials, dict):
+ return get_credentials(**credentials)
+ else:
+ return credentials
+
def __str__(self):
return "Creds :{creds}, client type: {client_type}, interface: " \
"{interface}, cached auth data: {cache}".format(
@@ -76,9 +83,9 @@
@classmethod
def check_credentials(cls, credentials):
"""
- Verify credentials are valid. Subclasses can do a better check.
+ Verify credentials are valid.
"""
- return isinstance(credentials, dict)
+ return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
@@ -218,16 +225,6 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
- @classmethod
- def check_credentials(cls, credentials, scoped=True):
- # tenant_name is optional if not scoped
- valid = super(KeystoneV2AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials
- if scoped:
- valid = valid and 'tenant_name' in credentials
- return valid
-
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
@@ -240,9 +237,9 @@
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
- user=self.credentials['username'],
- password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
auth_data=True)
else:
raise NotImplementedError
@@ -303,12 +300,15 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+ def _convert_credentials(self, credentials):
+ # For V3 do not convert as V3 Credentials are not defined yet
+ return credentials
+
@classmethod
def check_credentials(cls, credentials, scoped=True):
# tenant_name is optional if not scoped
- valid = super(KeystoneV3AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials and 'domain_name' in credentials
+ valid = 'username' in credentials and 'password' in credentials \
+ and 'domain_name' in credentials
if scoped:
valid = valid and 'tenant_name' in credentials
return valid
@@ -327,7 +327,7 @@
return dict(
user=self.credentials['username'],
password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
+ tenant=self.credentials['tenant_name'],
domain=self.credentials['domain_name'],
auth_data=True)
else:
@@ -398,3 +398,144 @@
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
+
+
+def get_credentials(credential_type=None, **kwargs):
+ """
+ Builds a credentials object based on the configured auth_version
+
+ :param credential_type (string): requests credentials from tempest
+ configuration file. Valid values are defined in
+ Credentials.TYPE.
+ :param kwargs (dict): take into account only if credential_type is
+ not specified or None. Dict of credential key/value pairs
+
+ Examples:
+
+ Returns credentials from the provided parameters:
+ >>> get_credentials(username='foo', password='bar')
+
+ Returns credentials from tempest configuration:
+ >>> get_credentials(credential_type='user')
+ """
+ if CONF.identity.auth_version == 'v2':
+ credential_class = KeystoneV2Credentials
+ else:
+ raise exceptions.InvalidConfiguration('Unsupported auth version')
+ if credential_type is not None:
+ creds = credential_class.get_default(credential_type)
+ else:
+ creds = credential_class(**kwargs)
+ return creds
+
+
+class Credentials(object):
+ """
+ Set of credentials for accessing OpenStack services
+
+ ATTRIBUTES: list of valid class attributes representing credentials.
+
+ TYPES: types of credentials available in the configuration file.
+ For each key there's a tuple (section, prefix) to match the
+ configuration options.
+ """
+
+ ATTRIBUTES = []
+ TYPES = {
+ 'identity_admin': ('identity', 'admin'),
+ 'compute_admin': ('compute_admin', None),
+ 'user': ('identity', None),
+ 'alt_user': ('identity', 'alt')
+ }
+
+ def __init__(self, **kwargs):
+ """
+ Enforce the available attributes at init time (only).
+ Additional attributes can still be set afterwards if tests need
+ to do so.
+ """
+ self._apply_credentials(kwargs)
+
+ def _apply_credentials(self, attr):
+ for key in attr.keys():
+ if key in self.ATTRIBUTES:
+ setattr(self, key, attr[key])
+ else:
+ raise exceptions.InvalidCredentials
+
+ def __str__(self):
+ """
+ Represent only attributes included in self.ATTRIBUTES
+ """
+ _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+ return str(_repr)
+
+ def __eq__(self, other):
+ """
+ Credentials are equal if attributes in self.ATTRIBUTES are equal
+ """
+ return str(self) == str(other)
+
+ def __getattr__(self, key):
+ # If an attribute is set, __getattr__ is not invoked
+ # If an attribute is not set, and it is a known one, return None
+ if key in self.ATTRIBUTES:
+ return None
+ else:
+ raise AttributeError
+
+ def __delitem__(self, key):
+ # For backwards compatibility, support dict behaviour
+ if key in self.ATTRIBUTES:
+ delattr(self, key)
+ else:
+ raise AttributeError
+
+ def get(self, item, default):
+ # In this patch act as dict for backward compatibility
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+ @classmethod
+ def get_default(cls, credentials_type):
+ if credentials_type not in cls.TYPES:
+ raise exceptions.InvalidCredentials()
+ creds = cls._get_default(credentials_type)
+ if not creds.is_valid():
+ raise exceptions.InvalidConfiguration()
+ return creds
+
+ @classmethod
+ def _get_default(cls, credentials_type):
+ raise NotImplementedError
+
+ def is_valid(self):
+ raise NotImplementedError
+
+
+class KeystoneV2Credentials(Credentials):
+
+ CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+ ATTRIBUTES = ['user_id', 'tenant_id']
+ ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+ @classmethod
+ def _get_default(cls, credentials_type='user'):
+ params = {}
+ section, prefix = cls.TYPES[credentials_type]
+ for attr in cls.CONF_ATTRIBUTES:
+ _section = getattr(CONF, section)
+ if prefix is None:
+ params[attr] = getattr(_section, attr)
+ else:
+ params[attr] = getattr(_section, prefix + "_" + attr)
+ return KeystoneV2Credentials(**params)
+
+ def is_valid(self):
+ """
+ Minimum set of valid credentials, are username and password.
+ Tenant is optional.
+ """
+ return None not in (self.username, self.password)
diff --git a/tempest/clients.py b/tempest/clients.py
index 0ebbd7c..2fe4c95 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -432,24 +432,6 @@
service=service)
-class OrchestrationManager(Manager):
- """
- Manager object that uses the admin credentials for its
- so that heat templates can create users
- """
- def __init__(self, interface='json', service=None):
- base = super(OrchestrationManager, self)
- # heat currently needs an admin user so that stacks can create users
- # however the tests need the demo tenant so that the neutron
- # private network is the default. DO NOT change this auth combination
- # until heat can run with the demo user.
- base.__init__(CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.tenant_name,
- interface=interface,
- service=service)
-
-
class OfficialClientManager(manager.Manager):
"""
Manager that provides access to the official python clients for
diff --git a/tempest/config.py b/tempest/config.py
index b9fe572..7084768 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -159,6 +159,19 @@
cfg.BoolOpt('run_ssh',
default=False,
help="Should the tests ssh to instances?"),
+ cfg.StrOpt('ssh_auth_method',
+ default='keypair',
+ help="Auth method used for authenticate to the instance. "
+ "Valid choices are: keypair, configured, adminpass. "
+ "keypair: start the servers with an ssh keypair. "
+ "configured: use the configured user and password. "
+ "adminpass: use the injected adminPass. "
+ "disabled: avoid using ssh when it is an option."),
+ cfg.StrOpt('ssh_connect_method',
+ default='fixed',
+ help="How to connect to the instance? "
+ "fixed: using the first ip belongs the fixed network "
+ "floating: creating and using a floating ip"),
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 270851d..f297f22 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import re
@@ -22,7 +23,7 @@
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
-SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
@@ -75,8 +76,32 @@
return 0, "T106: Don't put vi configuration in source files"
+def service_tags_not_in_module_path(physical_line, filename):
+ """Check that a service tag isn't in the module path
+
+ A service tag should only be added if the service name isn't already in
+ the module path.
+
+ T107
+ """
+ # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
+ # created for services like heat which would cause false negatives for
+ # those tests, so just exclude the scenario tests.
+ if 'tempest/scenario' not in filename:
+ matches = SCENARIO_DECORATOR.match(physical_line)
+ if matches:
+ services = matches.group(1).split(',')
+ for service in services:
+ service_name = service.strip().strip("'")
+ modulepath = os.path.split(filename)[0]
+ if service_name in modulepath:
+ return (physical_line.find(service_name),
+ "T107: service tag should not be in path")
+
+
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
register(no_setupclass_for_unit_tests)
register(no_vi_headers)
+ register(service_tags_not_in_module_path)
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b7a30f8..0210c56 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -31,7 +31,7 @@
Test large operations.
This test below:
- * Spin up multiple instances in one nova call
+ * Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
@@ -69,3 +69,5 @@
return
self.glance_image_create()
self.nova_boot()
+ self.nova_boot()
+ self.nova_boot()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 5f71461..db1badd 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -118,7 +118,7 @@
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
- self.server_ips[server.id] = server.networks[net.name][0]
+ self.server_ips[server.id] = server.networks[net['name']][0]
self.assertTrue(self.servers_keypairs)
return server
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index b9ee040..b1b06cc 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -422,11 +422,15 @@
access_point_ssh = self._connect_to_access_point(tenant)
mac_addr = access_point_ssh.get_mac_address()
mac_addr = mac_addr.strip().lower()
- port_list = self.network_client.list_ports()['ports']
+ # Get the fixed_ips and mac_address fields of all ports. Select
+ # only those two columns to reduce the size of the response.
+ port_list = self.network_client.list_ports(
+ fields=['fixed_ips', 'mac_address'])['ports']
port_detail_list = [
(port['fixed_ips'][0]['subnet_id'],
port['fixed_ips'][0]['ip_address'],
- port['mac_address'].lower()) for port in port_list
+ port['mac_address'].lower())
+ for port in port_list if port['fixed_ips']
]
server_ip = self._get_server_ip(tenant.access_point)
subnet_id = tenant.subnet.id
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
index 19821e7..98d8896 100644
--- a/tempest/services/compute/json/agents_client.py
+++ b/tempest/services/compute/json/agents_client.py
@@ -15,6 +15,7 @@
import json
import urllib
+from tempest.api_schema.compute import agents as common_schema
from tempest.api_schema.compute.v2 import agents as schema
from tempest.common import rest_client
from tempest import config
@@ -37,7 +38,9 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, json.loads(body).get('agents')
+ body = json.loads(body)
+ self.validate_response(common_schema.list_agents, resp, body)
+ return resp, body['agents']
def create_agent(self, **kwargs):
"""Create an agent build."""
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 54d1252..5c0b5d3 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -16,6 +16,7 @@
import json
from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -66,7 +67,9 @@
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v2_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v2schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index bd39a04..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -70,6 +70,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_images_details, resp, body)
return resp, body['images']
def get_image(self, image_id):
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 7411fb7..c19baf3 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -47,6 +47,7 @@
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def create_security_group(self, name, description):
@@ -62,6 +63,7 @@
post_body = json.dumps({'security_group': post_body})
resp, body = self.post('os-security-groups', post_body)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def update_security_group(self, security_group_id, name=None,
@@ -81,6 +83,7 @@
resp, body = self.put('os-security-groups/%s' % str(security_group_id),
post_body)
body = json.loads(body)
+ self.validate_response(schema.update_security_group, resp, body)
return resp, body['security_group']
def delete_security_group(self, security_group_id):
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 70a950a..82ded4c 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -276,6 +276,7 @@
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -286,6 +287,7 @@
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index e1c286c..48be54c 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -15,6 +15,7 @@
import json
import urllib
+from tempest.api_schema.compute import agents as common_schema
from tempest.api_schema.compute.v3 import agents as schema
from tempest.common import rest_client
from tempest import config
@@ -34,7 +35,9 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(common_schema.list_agents, resp, body)
+ return resp, body['agents']
def create_agent(self, **kwargs):
"""Create an agent build."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 0fc6af9..2487ee7 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -16,6 +16,7 @@
import json
from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -66,7 +67,9 @@
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v3_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v3schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index bbffc13..67232a8 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -284,6 +284,7 @@
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -294,6 +295,7 @@
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index e96b44b..c7b5f93 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -32,7 +32,6 @@
It returns pair: resp and parsed resource(s) body.
"""
-
resp, body = req_fun(uri, headers={
'Content-Type': 'application/json'
}, *args, **kwargs)
@@ -48,7 +47,7 @@
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
- uri = "node-group-templates/%s" % tmpl_id
+ uri = 'node-group-templates/%s' % tmpl_id
return self._request_and_parse(self.get, uri, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
@@ -59,7 +58,7 @@
It supports passing additional params using kwargs and returns created
object.
"""
- uri = "node-group-templates"
+ uri = 'node-group-templates'
body = kwargs.copy()
body.update({
'name': name,
@@ -75,7 +74,7 @@
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
- uri = "node-group-templates/%s" % tmpl_id
+ uri = 'node-group-templates/%s' % tmpl_id
return self.delete(uri)
def list_plugins(self):
@@ -87,7 +86,45 @@
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
- uri = "plugins/%s" % plugin_name
+ uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
return self._request_and_parse(self.get, uri, 'plugin')
+
+ def list_cluster_templates(self):
+ """List all cluster templates for a user."""
+
+ uri = 'cluster-templates'
+ return self._request_and_parse(self.get, uri, 'cluster_templates')
+
+ def get_cluster_template(self, tmpl_id):
+ """Returns the details of a single cluster template."""
+
+ uri = 'cluster-templates/%s' % tmpl_id
+ return self._request_and_parse(self.get, uri, 'cluster_template')
+
+ def create_cluster_template(self, name, plugin_name, hadoop_version,
+ node_groups, cluster_configs=None,
+ **kwargs):
+ """Creates cluster template with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object.
+ """
+ uri = 'cluster-templates'
+ body = kwargs.copy()
+ body.update({
+ 'name': name,
+ 'plugin_name': plugin_name,
+ 'hadoop_version': hadoop_version,
+ 'node_groups': node_groups,
+ 'cluster_configs': cluster_configs or dict(),
+ })
+ return self._request_and_parse(self.post, uri, 'cluster_template',
+ body=json.dumps(body))
+
+ def delete_cluster_template(self, tmpl_id):
+ """Deletes the specified cluster template by id."""
+
+ uri = 'cluster-templates/%s' % tmpl_id
+ return self.delete(uri)
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+ def _create_keypair(self):
+ keyname = data_utils.rand_name("key")
+ resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+ assert(resp.status == 200)
+
+ def _delete_keypair(self):
+ resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+ assert(resp.status == 202)
+
+ def _create_vm(self):
+ self.name = name = data_utils.rand_name("instance")
+ servers_client = self.manager.servers_client
+ self.logger.info("creating %s" % name)
+ vm_args = self.vm_extra_args.copy()
+ vm_args['security_groups'] = [self.sec_grp]
+ vm_args['key_name'] = self.key['name']
+ resp, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
+ self.server_id = server['id']
+ assert(resp.status == 202)
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
+
+ def _destroy_vm(self):
+ self.logger.info("deleting server: %s" % self.server_id)
+ resp, _ = self.manager.servers_client.delete_server(self.server_id)
+ assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.wait_for_server_termination(self.server_id)
+ self.logger.info("deleted server: %s" % self.server_id)
+
+ def _create_sec_group(self):
+ sec_grp_cli = self.manager.security_groups_client
+ s_name = data_utils.rand_name('sec_grp-')
+ s_description = data_utils.rand_name('desc-')
+ _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+ s_description)
+ create_rule = sec_grp_cli.create_security_group_rule
+ create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+ create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+ def _destroy_sec_grp(self):
+ sec_grp_cli = self.manager.security_groups_client
+ sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+ def _create_floating_ip(self):
+ floating_cli = self.manager.floating_ips_client
+ _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+ def _destroy_floating_ip(self):
+ cli = self.manager.floating_ips_client
+ cli.delete_floating_ip(self.floating['id'])
+ cli.wait_for_resource_deletion(self.floating['id'])
+ self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+ def _create_volume(self):
+ name = data_utils.rand_name("volume")
+ self.logger.info("creating volume: %s" % name)
+ volumes_client = self.manager.volumes_client
+ resp, self.volume = volumes_client.create_volume(size=1,
+ display_name=
+ name)
+ assert(resp.status == 200)
+ volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ self.logger.info("created volume: %s" % self.volume['id'])
+
+ def _delete_volume(self):
+ self.logger.info("deleting volume: %s" % self.volume['id'])
+ volumes_client = self.manager.volumes_client
+ resp, _ = volumes_client.delete_volume(self.volume['id'])
+ assert(resp.status == 202)
+ volumes_client.wait_for_resource_deletion(self.volume['id'])
+ self.logger.info("deleted volume: %s" % self.volume['id'])
+
+ def _wait_disassociate(self):
+ cli = self.manager.floating_ips_client
+
+ def func():
+ _, floating = cli.get_floating_ip_details(self.floating['id'])
+ return floating['instance_id'] is None
+
+ if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise RuntimeError("IP disassociate timeout!")
+
+ def new_server_ops(self):
+ self._create_vm()
+ cli = self.manager.floating_ips_client
+ cli.associate_floating_ip_to_server(self.floating['ip'],
+ self.server_id)
+ if self.ssh_test_before_attach and self.enable_ssh_verify:
+ self.logger.info("Scanning for block devices via ssh on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+
+ def setUp(self, **kwargs):
+ """Note able configuration combinations:
+ Closest options to the test_stamp_pattern:
+ new_server = True
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = False
+ Just attaching:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ Mostly API load by repeated attachment:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = False
+ ssh_test_before_attach = False
+ Minimal Nova load, but cinder load not decreased:
+ new_server = False
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ """
+ self.image = CONF.compute.image_ref
+ self.flavor = CONF.compute.flavor_ref
+ self.vm_extra_args = kwargs.get('vm_extra_args', {})
+ self.floating_pool = kwargs.get('floating_pool', None)
+ self.new_volume = kwargs.get('new_volume', True)
+ self.new_server = kwargs.get('new_server', False)
+ self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+ self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+ False)
+ self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+ self.detach_match_count = kwargs.get('detach_match_count', 1)
+ self.attach_match_count = kwargs.get('attach_match_count', 2)
+ self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+ self._create_floating_ip()
+ self._create_sec_group()
+ self._create_keypair()
+ private_key = self.key['private_key']
+ username = CONF.compute.image_ssh_user
+ self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+ username,
+ pkey=private_key)
+ if not self.new_volume:
+ self._create_volume()
+ if not self.new_server:
+ self.new_server_ops()
+
+ # now we just test is number of partition increased or decrised
+ def part_wait(self, num_match):
+ def _part_state():
+ self.partitions = self.remote_client.get_partitions().split('\n')
+ matching = 0
+ for part_line in self.partitions[1:]:
+ if self.part_line_re.match(part_line):
+ matching += 1
+ return matching == num_match
+ if tempest.test.call_until_true(_part_state,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ return
+ else:
+ raise RuntimeError("Unexpected partitions: %s",
+ str(self.partitions))
+
+ def run(self):
+ if self.new_server:
+ self.new_server_ops()
+ if self.new_volume:
+ self._create_volume()
+ servers_client = self.manager.servers_client
+ self.logger.info("attach volume (%s) to vm %s" %
+ (self.volume['id'], self.server_id))
+ resp, body = servers_client.attach_volume(self.server_id,
+ self.volume['id'],
+ self.part_name)
+ assert(resp.status == 200)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'in-use')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for new block device on %s"
+ % self.server_id)
+ self.part_wait(self.attach_match_count)
+
+ resp, body = servers_client.detach_volume(self.server_id,
+ self.volume['id'])
+ assert(resp.status == 202)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for block device disapperance on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+ if self.new_volume:
+ self._delete_volume()
+ if self.new_server:
+ self._destroy_vm()
+
+ def tearDown(self):
+ cli = self.manager.floating_ips_client
+ cli.disassociate_floating_ip_from_server(self.floating['ip'],
+ self.server_id)
+ self._wait_disassociate()
+ if not self.new_server:
+ self._destroy_vm()
+ self._delete_keypair()
+ self._destroy_floating_ip()
+ self._destroy_sec_grp()
+ if not self.new_volume:
+ self._delete_volume()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..25ae27f 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -80,11 +80,16 @@
return ret
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
- terminate_all_processes()
+ for process in processes:
+ if (not process['process'].is_alive() and
+ process['process'].exitcode != 0):
+ signal.signal(signalnum, signal.SIG_DFL)
+ terminate_all_processes()
+ break
def terminate_all_processes(check_interval=20):
@@ -202,6 +207,8 @@
had_errors = True
break
+ if stop_on_error:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+ "threads": 1,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"vm_extra_args": {},
+ "new_volume": true,
+ "new_server": false,
+ "ssh_test_before_attach": false,
+ "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/test.py b/tempest/test.py
index 8df405c..254fffa 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -75,12 +75,16 @@
try:
f(cls)
except Exception as se:
+ etype, value, trace = sys.exc_info()
LOG.exception("setUpClass failed: %s" % se)
try:
cls.tearDownClass()
except Exception as te:
LOG.exception("tearDownClass failed: %s" % te)
- raise se
+ try:
+ raise etype(value), None, trace
+ finally:
+ del trace # for avoiding circular refs
return decorator
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..ddffb4a 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.tests import fake_credentials
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+ return fake_credentials.FakeCredentials()
+
class FakeAuthProvider(object):
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4676cbd..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -45,6 +45,16 @@
os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
self.conf.set_default('lock_path',
str(os.environ.get('OS_TEST_LOCK_PATH')))
+ self.conf.set_default('auth_version', 'v2', group='identity')
+ for config_option in ['username', 'password', 'tenant_name']:
+ # Identity group items
+ for prefix in ['', 'alt_', 'admin_']:
+ self.conf.set_default(prefix + config_option,
+ 'fake_' + config_option,
+ group='identity')
+ # Compute Admin group items
+ self.conf.set_default(config_option, 'fake_' + config_option,
+ group='compute-admin')
class FakePrivate(config.TempestConfigPrivate):
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..a372973
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+ def is_valid(self):
+ return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ tenant_name='fake_tenant_name'
+ )
+ super(FakeKeystoneV2Credentials, self).__init__(**creds)
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 62c20e3..7b5b4d6 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -22,18 +22,16 @@
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
+from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
+from tempest.tests import fake_credentials
from tempest.tests import fake_http
from tempest.tests import fake_identity
class BaseAuthTestsSetUp(base.TestCase):
_auth_provider_class = None
- credentials = {
- 'username': 'fake_user',
- 'password': 'fake_pwd',
- 'tenant_name': 'fake_tenant'
- }
+ credentials = fake_credentials.FakeCredentials()
def _auth(self, credentials, **params):
"""
@@ -47,6 +45,8 @@
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.stubs.Set(auth, 'get_credentials',
+ fake_auth_provider.get_credentials)
self.auth_provider = self._auth(self.credentials)
@@ -58,12 +58,19 @@
"""
_auth_provider_class = auth.AuthProvider
- def test_check_credentials_is_dict(self):
- self.assertTrue(self.auth_provider.check_credentials({}))
+ def test_check_credentials_class(self):
+ self.assertRaises(NotImplementedError,
+ self.auth_provider.check_credentials,
+ auth.Credentials())
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
+ def test_instantiate_with_dict(self):
+ # Dict credentials are only supported for backward compatibility
+ auth_provider = self._auth(credentials={})
+ self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
def test_instantiate_with_bad_credentials_type(self):
"""
Assure that credentials with bad type fail with TypeError
@@ -104,6 +111,7 @@
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
_endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
_auth_provider_class = auth.KeystoneV2AuthProvider
+ credentials = fake_credentials.FakeKeystoneV2Credentials()
def setUp(self):
super(TestKeystoneV2AuthProvider, self).setUp()
@@ -210,17 +218,6 @@
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred))
- def test_check_credentials_not_scoped_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertTrue(self.auth_provider.check_credentials(cred,
- scoped=False))
-
- def test_check_credentials_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertFalse(self.auth_provider.check_credentials(cred))
-
def _test_base_url_helper(self, expected_url, filters,
auth_data=None):
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..86600fa
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,112 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+ attributes = {}
+ credentials_class = auth.Credentials
+
+ def _get_credentials(self, attributes=None):
+ if attributes is None:
+ attributes = self.attributes
+ return self.credentials_class(**attributes)
+
+ def setUp(self):
+ super(CredentialsTests, self).setUp()
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_create_invalid_attr(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ self._get_credentials,
+ attributes=dict(invalid='fake'))
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ self.assertRaises(NotImplementedError,
+ self.credentials_class.get_default,
+ credentials_type=ctype)
+
+ def test_invalid_default(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ auth.Credentials.get_default,
+ credentials_type='invalid_type')
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+ attributes = {
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ 'tenant_name': 'fake_tenant_name'
+ }
+
+ identity_response = fake_identity._fake_v2_response
+ credentials_class = auth.KeystoneV2Credentials
+
+ def setUp(self):
+ super(KeystoneV2CredentialsTests, self).setUp()
+ self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def _verify_credentials(self, credentials_class, creds_dict):
+ creds = auth.get_credentials(**creds_dict)
+ # Check the right version of credentials has been returned
+ self.assertIsInstance(creds, credentials_class)
+ # Check the id attributes are filled in
+ attributes = [x for x in creds.ATTRIBUTES if (
+ '_id' in x and x != 'domain_id')]
+ for attr in attributes:
+ self.assertIsNone(getattr(creds, attr))
+
+ def test_get_credentials(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(self.credentials_class, self.attributes)
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertTrue(creds.is_valid())
+
+ def test_is_not_valid(self):
+ creds = self._get_credentials()
+ for attr in self.attributes.keys():
+ delattr(creds, attr)
+ self.assertFalse(creds.is_valid(),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ creds = self.credentials_class.get_default(credentials_type=ctype)
+ for attr in self.attributes.keys():
+ # Default configuration values related to credentials
+ # are defined as fake_* in fake_config.py
+ self.assertEqual(getattr(creds, attr), 'fake_' + attr)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index e8610d3..33b8d6e 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -218,10 +218,8 @@
else:
self.assertNotEqual(instance.state, "running")
- # NOTE(afazekas): doctored test case,
- # with normal validation it would fail
@test.attr(type='smoke')
- def test_integration_1(self):
+ def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup-")
@@ -249,14 +247,20 @@
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
+
+ LOG.debug("Instance booted - state: %s",
+ reservation.instances[0].state)
+
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(1, self.zone)
+ LOG.debug("Volume created - status: %s", volume.status)
+
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
- LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
+ LOG.debug("Instance now running - state: %s", instance.state)
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
@@ -284,10 +288,21 @@
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
+ """Return volume state realizing that 'in-use' is overloaded."""
volume.update(validate=True)
- return volume.status
+ status = volume.status
+ attached = volume.attach_data.status
+ LOG.debug("Volume %s is in status: %s, attach_status: %s",
+ volume.id, status, attached)
+ # Nova reports 'in-use' on 'attaching' volumes because we
+ # have a single volume status, and EC2 has 2. Ensure that
+ # if we aren't attached yet we return something other than
+ # 'in-use'
+ if status == 'in-use' and attached != 'attached':
+ return 'attaching'
+ else:
+ return status
- self.assertVolumeStatusWait(_volume_state, "in-use")
wait.re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
@@ -296,6 +311,7 @@
def _part_state():
current = ssh.get_partitions().split('\n')
+ LOG.debug("Partition map for instance: %s", current)
if current > part_lines:
return 'INCREASE'
if current < part_lines:
@@ -311,7 +327,6 @@
self.assertVolumeStatusWait(_volume_state, "available")
wait.re_search_wait(_volume_state, "available")
- LOG.info("Volume %s state: %s", volume.id, volume.status)
wait.state_wait(_part_state, 'DECREASE')
@@ -323,7 +338,7 @@
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
- LOG.info("state: %s", instance.state)
+ LOG.debug("Instance %s state: %s", instance.id, instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
index 30785c4..1726beb 100755
--- a/tools/verify_tempest_config.py
+++ b/tools/verify_tempest_config.py
@@ -44,10 +44,11 @@
client_dict = {
'nova': os.servers_client,
'keystone': os.identity_client,
+ 'cinder': os.volumes_client,
}
client_dict[service].skip_path()
- endpoint_parts = urlparse.urlparse(client_dict[service])
- endpoint = endpoint_parts.scheme + '//' + endpoint_parts.netloc
+ endpoint_parts = urlparse.urlparse(client_dict[service].base_url)
+ endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
__, body = RAW_HTTP.request(endpoint, 'GET')
client_dict[service].reset_path()
body = json.loads(body)
@@ -76,6 +77,17 @@
not CONF.compute_feature_enabled.api_v3))
+def verify_cinder_api_versions(os):
+ # Check cinder api versions
+ versions = _get_api_versions(os, 'cinder')
+ if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+ print('Config option volume api_v2 should be change to: %s' % (
+ not CONF.volume_feature_enabled.api_v1))
+ if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+ print('Config option volume api_v2 should be change to: %s' % (
+ not CONF.volume_feature_enabled.api_v2))
+
+
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
@@ -169,10 +181,11 @@
'orchestration': 'heat',
'metering': 'ceilometer',
'telemetry': 'ceilometer',
- 'data_processing': 'savanna',
+ 'data_processing': 'sahara',
'baremetal': 'ironic',
- 'identity': 'keystone'
-
+ 'identity': 'keystone',
+ 'queuing': 'marconi',
+ 'database': 'trove'
}
# Get catalog list for endpoints to use for validation
__, endpoints = os.endpoints_client.list_endpoints()
@@ -219,6 +232,7 @@
verify_keystone_api_versions(os)
verify_glance_api_versions(os)
verify_nova_api_versions(os)
+ verify_cinder_api_versions(os)
display_results(results)