Merge "Refactor random url generation into its own method"
diff --git a/doc/source/index.rst b/doc/source/index.rst
index c45273e..25bc900 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,8 +1,3 @@
-.. Tempest documentation master file, created by
- sphinx-quickstart on Tue May 21 17:43:32 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
=======================
Tempest Testing Project
=======================
diff --git a/run_tempest.sh b/run_tempest.sh
index bdd1f69..5a9b742 100755
--- a/run_tempest.sh
+++ b/run_tempest.sh
@@ -58,7 +58,7 @@
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
- *) testrargs+="$testrargs $1";;
+ *) testrargs="$testrargs $1";;
esac
shift
done
diff --git a/tempest/README.rst b/tempest/README.rst
index dbac809..18c7cf3 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -62,13 +62,10 @@
stress
------
-Stress tests are designed to stress an OpenStack environment by
-running a high workload against it and seeing what breaks. Tools may
-be provided to help detect breaks (stack traces in the logs).
-
-TODO: old stress tests deleted, new_stress that david is working on
-moves into here.
-
+Stress tests are designed to stress an OpenStack environment by running a high
+workload against it and seeing what breaks. The stress test framework runs
+several test jobs in parallel and can run any existing test in Tempest as a
+stress job.
thirdparty
----------
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 6f7e438..62edd10 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -21,6 +21,14 @@
CONF = config.CONF
+# NOTE(adam_g): The baremetal API tests exercise operations such as enroll
+# node, power on, power off, etc. Testing against real drivers (ie, IPMI)
+# will require passing driver-specific data to Tempest (addresses,
+# credentials, etc). Until then, only support testing against the fake driver,
+# which has no external dependencies.
+SUPPORTED_DRIVERS = ['fake']
+
+
def creates(resource):
"""Decorator that adds resources to the appropriate cleanup list."""
@@ -48,6 +56,13 @@
skip_msg = ('%s skipped as Ironic is not available' % cls.__name__)
raise cls.skipException(skip_msg)
+ if CONF.baremetal.driver not in SUPPORTED_DRIVERS:
+ skip_msg = ('%s skipped as Ironic driver %s is not supported for '
+ 'testing.' %
+ (cls.__name__, CONF.baremetal.driver))
+ raise cls.skipException(skip_msg)
+ cls.driver = CONF.baremetal.driver
+
mgr = clients.AdminManager()
cls.client = mgr.baremetal_client
cls.power_timeout = CONF.baremetal.power_timeout
@@ -85,7 +100,7 @@
@classmethod
@creates('node')
def create_node(cls, chassis_id, cpu_arch='x86', cpu_num=8, storage=1024,
- memory=4096, driver='fake'):
+ memory=4096):
"""
Wrapper utility for creating test baremetal nodes.
@@ -98,7 +113,7 @@
"""
resp, body = cls.client.create_node(chassis_id, cpu_arch=cpu_arch,
cpu_num=cpu_num, storage=storage,
- memory=memory, driver=driver)
+ memory=memory, driver=cls.driver)
return resp, body
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 48f9ffb..85b26a1 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -86,8 +86,27 @@
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
- has_valid_uptime = False
+ # Ironic will register each baremetal node as a 'hypervisor',
+ # so the hypervisor list can contain many hypervisors of type
+ # 'ironic'. If they are ALL ironic, skip this test since ironic
+ # doesn't support hypervisor uptime. Otherwise, remove them
+ # from the list of hypervisors to test.
+ ironic_only = True
+ hypers_without_ironic = []
for hyper in hypers:
+ resp, details = (self.client.
+ get_hypervisor_show_details(hypers[0]['id']))
+ self.assertEqual(200, resp.status)
+ if details['hypervisor_type'] != 'ironic':
+ hypers_without_ironic.append(hyper)
+ ironic_only = False
+
+ if ironic_only:
+ raise self.skipException(
+ "Ironic does not support hypervisor uptime")
+
+ has_valid_uptime = False
+ for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
diff --git a/tempest/api/compute/v3/admin/test_hypervisor.py b/tempest/api/compute/v3/admin/test_hypervisor.py
index f3397a8..9a23789 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor.py
@@ -83,7 +83,27 @@
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
- resp, uptime = self.client.get_hypervisor_uptime(hypers[0]['id'])
+ # Ironic will register each baremetal node as a 'hypervisor',
+ # so the hypervisor list can contain many hypervisors of type
+ # 'ironic'. If they are ALL ironic, skip this test since ironic
+ # doesn't support hypervisor uptime. Otherwise, remove them
+ # from the list of hypervisors to test.
+ ironic_only = True
+ hypers_without_ironic = []
+ for hyper in hypers:
+ resp, details = (self.client.
+ get_hypervisor_show_details(hypers[0]['id']))
+ self.assertEqual(200, resp.status)
+ if details['hypervisor_type'] != 'ironic':
+ hypers_without_ironic.append(hyper)
+ ironic_only = False
+
+ if ironic_only:
+ raise self.skipException(
+ "Ironic does not support hypervisor uptime")
+
+ resp, uptime = self.client.get_hypervisor_uptime(
+ hypers_without_ironic[0]['id'])
self.assertEqual(200, resp.status)
self.assertTrue(len(uptime) > 0)
diff --git a/tempest/api/orchestration/stacks/test_soft_conf.py b/tempest/api/orchestration/stacks/test_soft_conf.py
new file mode 100644
index 0000000..8903d4c
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_soft_conf.py
@@ -0,0 +1,163 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class TestSoftwareConfig(base.BaseOrchestrationTest):
+
+ def setUp(self):
+ super(TestSoftwareConfig, self).setUp()
+ self.configs = []
+ # Add 2 sets of software configuration
+ self.configs.append(self._config_create('a'))
+ self.configs.append(self._config_create('b'))
+ # Create a deployment using config a's id
+ self._deployment_create(self.configs[0]['id'])
+
+ def _config_create(self, suffix):
+ configuration = {'group': 'script',
+ 'inputs': [],
+ 'outputs': [],
+ 'options': {}}
+ configuration['name'] = 'heat_soft_config_%s' % suffix
+ configuration['config'] = '#!/bin/bash echo init-%s' % suffix
+ api_config = self.client.create_software_config(**configuration)
+ configuration['id'] = api_config['software_config']['id']
+ self.addCleanup(self._config_delete, configuration['id'])
+ self._validate_config(configuration, api_config)
+ return configuration
+
+ def _validate_config(self, configuration, api_config):
+ # Assert all expected keys are present with matching data
+ for k in configuration.keys():
+ self.assertEqual(configuration[k],
+ api_config['software_config'][k])
+
+ def _deployment_create(self, config_id):
+ self.server_id = data_utils.rand_name('dummy-server')
+ self.action = 'ACTION_0'
+ self.status = 'STATUS_0'
+ self.input_values = {}
+ self.output_values = []
+ self.status_reason = 'REASON_0'
+ self.signal_transport = 'NO_SIGNAL'
+ self.deployment = self.client.create_software_deploy(
+ self.server_id, config_id, self.action, self.status,
+ self.input_values, self.output_values, self.status_reason,
+ self.signal_transport)
+ self.deployment_id = self.deployment['software_deployment']['id']
+ self.addCleanup(self._deployment_delete, self.deployment_id)
+
+ def _deployment_delete(self, deploy_id):
+ self.client.delete_software_deploy(deploy_id)
+ # Testing that it is really gone
+ self.assertRaises(
+ exceptions.NotFound, self.client.get_software_deploy,
+ self.deployment_id)
+
+ def _config_delete(self, config_id):
+ self.client.delete_software_config(config_id)
+ # Testing that it is really gone
+ self.assertRaises(
+ exceptions.NotFound, self.client.get_software_config, config_id)
+
+ @test.attr(type='smoke')
+ def test_get_software_config(self):
+ """Testing software config get."""
+ for conf in self.configs:
+ api_config = self.client.get_software_config(conf['id'])
+ self._validate_config(conf, api_config)
+
+ @test.attr(type='smoke')
+ def test_get_deployment_list(self):
+ """Getting a list of all deployments"""
+ deploy_list = self.client.get_software_deploy_list()
+ deploy_ids = [deploy['id'] for deploy in
+ deploy_list['software_deployments']]
+ self.assertIn(self.deployment_id, deploy_ids)
+
+ @test.attr(type='smoke')
+ def test_get_deployment_metadata(self):
+ """Testing deployment metadata get"""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ conf_ids = [conf['id'] for conf in metadata['metadata']]
+ self.assertIn(self.configs[0]['id'], conf_ids)
+
+ def _validate_deployment(self, action, status, reason, config_id):
+ deployment = self.client.get_software_deploy(self.deployment_id)
+ self.assertEqual(action, deployment['software_deployment']['action'])
+ self.assertEqual(status, deployment['software_deployment']['status'])
+ self.assertEqual(reason,
+ deployment['software_deployment']['status_reason'])
+ self.assertEqual(config_id,
+ deployment['software_deployment']['config_id'])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_create_validate(self):
+ """Testing software deployment was created as expected."""
+ # Asserting that all fields were created
+ self.assert_fields_in_dict(
+ self.deployment['software_deployment'], 'action', 'config_id',
+ 'id', 'input_values', 'output_values', 'server_id', 'status',
+ 'status_reason')
+ # Testing get for this deployment and verifying parameters
+ self._validate_deployment(self.action, self.status,
+ self.status_reason, self.configs[0]['id'])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_update_no_metadata_change(self):
+ """Testing software deployment update without metadata change."""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ # Updating values without changing the configuration ID
+ new_action = 'ACTION_1'
+ new_status = 'STATUS_1'
+ new_reason = 'REASON_1'
+ self.client.update_software_deploy(
+ self.deployment_id, self.server_id, self.configs[0]['id'],
+ new_action, new_status, self.input_values, self.output_values,
+ new_reason, self.signal_transport)
+ # Verifying get and that the deployment was updated as expected
+ self._validate_deployment(new_action, new_status,
+ new_reason, self.configs[0]['id'])
+
+ # Metadata should not be changed at this point
+ test_metadata = self.client.get_software_deploy_meta(self.server_id)
+ for key in metadata['metadata'][0]:
+ self.assertEqual(
+ metadata['metadata'][0][key],
+ test_metadata['metadata'][0][key])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_update_with_metadata_change(self):
+ """Testing software deployment update with metadata change."""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ self.client.update_software_deploy(
+ self.deployment_id, self.server_id, self.configs[1]['id'],
+ self.action, self.status, self.input_values,
+ self.output_values, self.status_reason, self.signal_transport)
+ self._validate_deployment(self.action, self.status,
+ self.status_reason, self.configs[1]['id'])
+ # Metadata should now be changed
+ new_metadata = self.client.get_software_deploy_meta(self.server_id)
+ # Its enough to test the ID in this case
+ meta_id = metadata['metadata'][0]['id']
+ test_id = new_metadata['metadata'][0]['id']
+ self.assertNotEqual(meta_id, test_id)
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index ad0aa29..a16e425 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -48,50 +48,52 @@
}
}
+common_show_server = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'image': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'flavor': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'user_id': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'links': parameter_types.links,
+ 'addresses': parameter_types.addresses,
+ },
+ # NOTE(GMann): 'progress' attribute is present in the response
+ # only when server's status is one of the progress statuses
+ # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
+ # So it is not defined as 'required'.
+ 'required': ['id', 'name', 'status', 'image', 'flavor',
+ 'user_id', 'tenant_id', 'created', 'updated',
+ 'metadata', 'links', 'addresses']
+}
+
base_update_get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'server': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'name': {'type': 'string'},
- 'status': {'type': 'string'},
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
- 'flavor': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
- 'user_id': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'created': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'links': parameter_types.links,
- 'addresses': parameter_types.addresses,
- },
- # NOTE(GMann): 'progress' attribute is present in the response
- # only when server's status is one of the progress statuses
- # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
- # So it is not defined as 'required'.
- 'required': ['id', 'name', 'status', 'image', 'flavor',
- 'user_id', 'tenant_id', 'created', 'updated',
- 'metadata', 'links', 'addresses']
- }
+ 'server': common_show_server
},
'required': ['server']
}
@@ -179,3 +181,40 @@
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message']
}
+
+instance_action_events = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'event': {'type': 'string'},
+ 'start_time': {'type': 'string'},
+ 'finish_time': {'type': 'string'},
+ 'result': {'type': 'string'},
+ 'traceback': {'type': ['string', 'null']}
+ },
+ 'required': ['event', 'start_time', 'finish_time', 'result',
+ 'traceback']
+ }
+}
+
+common_get_instance_action = copy.deepcopy(common_instance_actions)
+
+common_get_instance_action['properties'].update({
+ 'events': instance_action_events})
+# 'events' does not come in response body always so it is not
+# defined as 'required'
+
+base_list_servers_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'servers': {
+ 'type': 'array',
+ 'items': common_show_server
+ }
+ },
+ 'required': ['servers']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index dc4054c..95c5760 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -240,3 +240,33 @@
'required': ['instanceActions']
}
}
+
+get_instance_actions_object = copy.deepcopy(servers.common_get_instance_action)
+get_instance_actions_object[
+ 'properties'].update({'instance_uuid': {'type': 'string'}})
+get_instance_actions_object['required'].extend(['instance_uuid'])
+
+get_instance_action = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'instanceAction': get_instance_actions_object
+ },
+ 'required': ['instanceAction']
+ }
+}
+
+list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({
+ 'hostId': {'type': 'string'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6
+ })
+# NOTE(GMann): OS-DCF:diskConfig and accessIPv4/v6 are API
+# extensions, and some environments return a response
+# without these attributes. So they are not 'required'.
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('hostId')
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 1af951f..541d3ff 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -26,7 +26,7 @@
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
- 'volumeType': {'type': 'string'},
+ 'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
@@ -74,7 +74,7 @@
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
- 'volumeType': {'type': 'string'},
+ 'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 3b50516..dc800cd 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -151,3 +151,33 @@
'required': ['server_actions']
}
}
+
+get_server_actions_object = copy.deepcopy(servers.common_get_instance_action)
+get_server_actions_object[
+ 'properties'].update({'server_uuid': {'type': 'string'}})
+get_server_actions_object['required'].extend(['server_uuid'])
+
+get_server_action = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server_action': get_server_actions_object
+ },
+ 'required': ['server_action']
+ }
+}
+
+list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({
+ 'addresses': addresses_v3,
+ 'host_id': {'type': 'string'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+ })
+# NOTE(GMann): os-access-ips:access_ip_v4/v6 are API extension,
+# and some environments return a response without these
+# attributes. So they are not 'required'.
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('host_id')
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 19e816b..0b72b1c 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -169,7 +169,7 @@
def collect_users(users):
global USERS
- LOG.info("Creating users")
+ LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
@@ -202,6 +202,7 @@
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
+ LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
_, found = client.identity.get_user(user['id'])
@@ -217,6 +218,7 @@
def check_objects(self):
"""Check that the objects created are still there."""
+ LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
@@ -226,6 +228,7 @@
def check_servers(self):
"""Check that the servers are still up and running."""
+ LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
@@ -242,6 +245,7 @@
def check_volumes(self):
"""Check that the volumes are still there and attached."""
+ LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
found = _get_volume_by_name(client, volume['name'])
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index d8474a0..d242c14 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -22,16 +22,6 @@
LOG = logging.getLogger(__name__)
-def _console_dump(client, server_id):
- try:
- resp, output = client.get_console_output(server_id, None)
- LOG.debug("Console Output for Server %s:\n%s" % (
- server_id, output))
- except exceptions.NotFound:
- LOG.debug("Server %s: doesn't have a console" % server_id)
- pass
-
-
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
@@ -81,10 +71,12 @@
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
-
if (server_status == 'ERROR') and raise_on_error:
- _console_dump(client, server_id)
- raise exceptions.BuildErrorException(server_id=server_id)
+ if 'fault' in body:
+ raise exceptions.BuildErrorException(body['fault'],
+ server_id=server_id)
+ else:
+ raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
@@ -99,11 +91,9 @@
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
-
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
- _console_dump(client, server_id)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 800b3b0..8191984 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -141,7 +141,7 @@
server_or_ip=ip,
private_key=private_key)
- # Write a backend's responce into a file
+ # Write a backend's response into a file
resp = """echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n""" \
"""Connection: close\r\nContent-Type: text/html; """ \
"""charset=UTF-8\r\n\r\n%s"; cat >/dev/null"""
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 69d2f35..80bb711 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -164,6 +164,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
@@ -473,6 +474,7 @@
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
+ self.validate_response(schema.get_instance_action, resp, body)
return resp, body['instanceAction']
def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index d933998..a5b31d3 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -166,6 +166,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
@@ -470,6 +471,7 @@
resp, body = self.get("servers/%s/os-server-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
+ self.validate_response(schema.get_server_action, resp, body)
return resp, body['server_action']
def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index c459f28..d325eb5 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -259,3 +259,140 @@
'parameters': parameters,
}
return self._validate_template(post_body)
+
+ def create_software_config(self, name=None, config=None, group=None,
+ inputs=None, outputs=None, options=None):
+ headers, body = self._prep_software_config_create(
+ name, config, group, inputs, outputs, options)
+
+ url = 'software_configs'
+ resp, body = self.post(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_config(self, conf_id):
+ """Returns a software configuration resource."""
+ url = 'software_configs/%s' % str(conf_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def delete_software_config(self, conf_id):
+ """Deletes a specific software configuration."""
+ url = 'software_configs/%s' % str(conf_id)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp)
+
+ def create_software_deploy(self, server_id=None, config_id=None,
+ action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None, signal_transport=None):
+ """Creates or updates a software deployment."""
+ headers, body = self._prep_software_deploy_update(
+ None, server_id, config_id, action, status, input_values,
+ output_values, status_reason, signal_transport)
+
+ url = 'software_deployments'
+ resp, body = self.post(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def update_software_deploy(self, deploy_id=None, server_id=None,
+ config_id=None, action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None, signal_transport=None):
+ """Creates or updates a software deployment."""
+ headers, body = self._prep_software_deploy_update(
+ deploy_id, server_id, config_id, action, status, input_values,
+ output_values, status_reason, signal_transport)
+
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, body = self.put(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy_list(self):
+ """Returns a list of all deployments."""
+ url = 'software_deployments'
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy(self, deploy_id):
+ """Returns a specific software deployment."""
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy_meta(self, server_id):
+ """Return a config metadata for a specific server."""
+ url = 'software_deployments/metadata/%s' % server_id
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def delete_software_deploy(self, deploy_id):
+ """Deletes a specific software deployment."""
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp)
+
+ def _prep_software_config_create(self, name=None, conf=None, group=None,
+ inputs=None, outputs=None, options=None):
+ """Prepares a software configuration body."""
+ post_body = {}
+ if name is not None:
+ post_body["name"] = name
+ if conf is not None:
+ post_body["config"] = conf
+ if group is not None:
+ post_body["group"] = group
+ if inputs is not None:
+ post_body["inputs"] = inputs
+ if outputs is not None:
+ post_body["outputs"] = outputs
+ if options is not None:
+ post_body["options"] = options
+ body = json.dumps(post_body)
+
+ headers = self.get_headers()
+ return headers, body
+
+ def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
+ config_id=None, action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None,
+ signal_transport=None):
+ """Prepares a deployment create or update (if an id was given)."""
+ post_body = {}
+
+ if deploy_id is not None:
+ post_body["id"] = deploy_id
+ if server_id is not None:
+ post_body["server_id"] = server_id
+ if config_id is not None:
+ post_body["config_id"] = config_id
+ if action is not None:
+ post_body["action"] = action
+ if status is not None:
+ post_body["status"] = status
+ if input_values is not None:
+ post_body["input_values"] = input_values
+ if output_values is not None:
+ post_body["output_values"] = output_values
+ if status_reason is not None:
+ post_body["status_reason"] = status_reason
+ if signal_transport is not None:
+ post_body["signal_transport"] = signal_transport
+ body = json.dumps(post_body)
+
+ headers = self.get_headers()
+ return headers, body
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index c77faca..a7af619 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -102,7 +102,7 @@
}
}
- unkown_type_schema = {
+ unknown_type_schema = {
"type": "not_defined"
}
@@ -131,7 +131,7 @@
def test_generate_with_unknown_type(self):
self.assertRaises(TypeError, self.generator.generate,
- self.unkown_type_schema)
+ self.unknown_type_schema)
class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 7713931..2c68d6b 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -177,7 +177,10 @@
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
- self.assertEqual(len(tags), 0, str(tags))
+
+ # NOTE: Volume-attach and detach causes metadata (tags) to be created
+ # for the volume. So exclude them while asserting.
+ self.assertNotIn('key1', tags)
for instance in reservation.instances:
instance.stop()
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
index c6f8eab..8ad59bb 100755
--- a/tools/subunit-trace.py
+++ b/tools/subunit-trace.py
@@ -263,7 +263,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
- 'debug infomation in realtime')
+ 'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')