Merge "Create test images via Glance instead of Nova"
diff --git a/doc/source/index.rst b/doc/source/index.rst
index c45273e..25bc900 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,8 +1,3 @@
-.. Tempest documentation master file, created by
- sphinx-quickstart on Tue May 21 17:43:32 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
=======================
Tempest Testing Project
=======================
diff --git a/requirements.txt b/requirements.txt
index ab2903a..9a3b74d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,11 +7,12 @@
boto>=2.12.0,!=2.13.0
paramiko>=1.13.0
netaddr>=0.7.6
-python-glanceclient>=0.9.0
+python-ceilometerclient>=1.0.6
+python-glanceclient>=0.13.1
python-keystoneclient>=0.9.0
python-novaclient>=2.17.0
-python-neutronclient>=2.3.4,<3
-python-cinderclient>=1.0.6
+python-neutronclient>=2.3.5,<3
+python-cinderclient>=1.0.7
python-heatclient>=0.2.9
python-ironicclient
python-saharaclient>=0.6.0
diff --git a/run_tempest.sh b/run_tempest.sh
index bdd1f69..5a9b742 100755
--- a/run_tempest.sh
+++ b/run_tempest.sh
@@ -58,7 +58,7 @@
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
- *) testrargs+="$testrargs $1";;
+ *) testrargs="$testrargs $1";;
esac
shift
done
diff --git a/tempest/README.rst b/tempest/README.rst
index dbac809..18c7cf3 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -62,13 +62,10 @@
stress
------
-Stress tests are designed to stress an OpenStack environment by
-running a high workload against it and seeing what breaks. Tools may
-be provided to help detect breaks (stack traces in the logs).
-
-TODO: old stress tests deleted, new_stress that david is working on
-moves into here.
-
+Stress tests are designed to stress an OpenStack environment by running a high
+workload against it and seeing what breaks. The stress test framework runs
+several test jobs in parallel and can run any existing test in Tempest as a
+stress job.
thirdparty
----------
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 6f7e438..62edd10 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -21,6 +21,14 @@
CONF = config.CONF
+# NOTE(adam_g): The baremetal API tests exercise operations such as enroll
+# node, power on, power off, etc. Testing against real drivers (ie, IPMI)
+# will require passing driver-specific data to Tempest (addresses,
+# credentials, etc). Until then, only support testing against the fake driver,
+# which has no external dependencies.
+SUPPORTED_DRIVERS = ['fake']
+
+
def creates(resource):
"""Decorator that adds resources to the appropriate cleanup list."""
@@ -48,6 +56,13 @@
skip_msg = ('%s skipped as Ironic is not available' % cls.__name__)
raise cls.skipException(skip_msg)
+ if CONF.baremetal.driver not in SUPPORTED_DRIVERS:
+ skip_msg = ('%s skipped as Ironic driver %s is not supported for '
+ 'testing.' %
+ (cls.__name__, CONF.baremetal.driver))
+ raise cls.skipException(skip_msg)
+ cls.driver = CONF.baremetal.driver
+
mgr = clients.AdminManager()
cls.client = mgr.baremetal_client
cls.power_timeout = CONF.baremetal.power_timeout
@@ -85,7 +100,7 @@
@classmethod
@creates('node')
def create_node(cls, chassis_id, cpu_arch='x86', cpu_num=8, storage=1024,
- memory=4096, driver='fake'):
+ memory=4096):
"""
Wrapper utility for creating test baremetal nodes.
@@ -98,7 +113,7 @@
"""
resp, body = cls.client.create_node(chassis_id, cpu_arch=cpu_arch,
cpu_num=cpu_num, storage=storage,
- memory=memory, driver=driver)
+ memory=memory, driver=cls.driver)
return resp, body
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 9555367..3a6de36 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -26,7 +26,7 @@
@classmethod
def setUpClass(cls):
super(AZAdminV3Test, cls).setUpClass()
- cls.client = cls.os_adm.availability_zone_client
+ cls.client = cls.availability_zone_admin_client
@test.attr(type='gate')
def test_get_availability_zone_list(self):
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 48f9ffb..85b26a1 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -86,8 +86,27 @@
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
- has_valid_uptime = False
+ # Ironic will register each baremetal node as a 'hypervisor',
+ # so the hypervisor list can contain many hypervisors of type
+ # 'ironic'. If they are ALL ironic, skip this test since ironic
+ # doesn't support hypervisor uptime. Otherwise, remove them
+ # from the list of hypervisors to test.
+ ironic_only = True
+ hypers_without_ironic = []
for hyper in hypers:
+ resp, details = (self.client.
+ get_hypervisor_show_details(hypers[0]['id']))
+ self.assertEqual(200, resp.status)
+ if details['hypervisor_type'] != 'ironic':
+ hypers_without_ironic.append(hyper)
+ ironic_only = False
+
+ if ironic_only:
+ raise self.skipException(
+ "Ironic does not support hypervisor uptime")
+
+ has_valid_uptime = False
+ for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index caf4174..70a9604 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -392,8 +392,11 @@
msg = ("Missing Compute Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
+ if cls._api_version == 2:
+ cls.availability_zone_admin_client = (
+ cls.os_adm.availability_zone_client)
- if cls._api_version == 3:
+ else:
cls.servers_admin_client = cls.os_adm.servers_v3_client
cls.services_admin_client = cls.os_adm.services_v3_client
cls.availability_zone_admin_client = \
diff --git a/tempest/api/compute/v3/admin/test_hypervisor.py b/tempest/api/compute/v3/admin/test_hypervisor.py
index f3397a8..9a23789 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor.py
@@ -83,7 +83,27 @@
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
- resp, uptime = self.client.get_hypervisor_uptime(hypers[0]['id'])
+ # Ironic will register each baremetal node as a 'hypervisor',
+ # so the hypervisor list can contain many hypervisors of type
+ # 'ironic'. If they are ALL ironic, skip this test since ironic
+ # doesn't support hypervisor uptime. Otherwise, remove them
+ # from the list of hypervisors to test.
+ ironic_only = True
+ hypers_without_ironic = []
+ for hyper in hypers:
+ resp, details = (self.client.
+ get_hypervisor_show_details(hypers[0]['id']))
+ self.assertEqual(200, resp.status)
+ if details['hypervisor_type'] != 'ironic':
+ hypers_without_ironic.append(hyper)
+ ironic_only = False
+
+ if ironic_only:
+ raise self.skipException(
+ "Ironic does not support hypervisor uptime")
+
+ resp, uptime = self.client.get_hypervisor_uptime(
+ hypers_without_ironic[0]['id'])
self.assertEqual(200, resp.status)
self.assertTrue(len(uptime) > 0)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 1548f89..6beb8f2 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -40,7 +40,7 @@
cls.setup_endpoints = list()
for i in range(2):
region = data_utils.rand_name('region')
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
interface = 'public'
resp, endpoint = cls.client.create_endpoint(
cls.service_id, interface, url, region=region, enabled=True)
@@ -69,7 +69,7 @@
@test.attr(type='gate')
def test_create_list_delete_endpoint(self):
region = data_utils.rand_name('region')
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
interface = 'public'
resp, endpoint =\
self.client.create_endpoint(self.service_id, interface, url,
@@ -97,7 +97,7 @@
# Creating an endpoint so as to check update endpoint
# with new values
region1 = data_utils.rand_name('region')
- url1 = data_utils.rand_name('url')
+ url1 = data_utils.rand_url()
interface1 = 'public'
resp, endpoint_for_update =\
self.client.create_endpoint(self.service_id, interface1,
@@ -114,7 +114,7 @@
self.service_ids.append(service2['id'])
# Updating endpoint with new values
region2 = data_utils.rand_name('region')
- url2 = data_utils.rand_name('url')
+ url2 = data_utils.rand_url()
interface2 = 'internal'
resp, endpoint = \
self.client.update_endpoint(endpoint_for_update['id'],
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 1d63cce..d728b1d 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -49,7 +49,7 @@
def test_create_with_enabled_False(self):
# Enabled should be a boolean, not a string like 'False'
interface = 'public'
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
region = data_utils.rand_name('region')
self.assertRaises(exceptions.BadRequest, self.client.create_endpoint,
self.service_id, interface, url, region=region,
@@ -59,7 +59,7 @@
def test_create_with_enabled_True(self):
# Enabled should be a boolean, not a string like 'True'
interface = 'public'
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
region = data_utils.rand_name('region')
self.assertRaises(exceptions.BadRequest, self.client.create_endpoint,
self.service_id, interface, url, region=region,
@@ -69,7 +69,7 @@
# Create an endpoint
region1 = data_utils.rand_name('region')
- url1 = data_utils.rand_name('url')
+ url1 = data_utils.rand_url()
interface1 = 'public'
resp, endpoint_for_update = (
self.client.create_endpoint(self.service_id, interface1,
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
new file mode 100644
index 0000000..497c5ea
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -0,0 +1,100 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ _interface = 'json'
+
+ def _list_users_with_params(self, params, key, expected, not_expected):
+ # Helper method to list users filtered with params and
+ # assert the response based on expected and not_expected
+ # expected: user expected in the list response
+ # not_expected: user, which should not be present in list response
+ _, body = self.client.get_users(params)
+ self.assertIn(expected[key], map(lambda x: x[key], body))
+ self.assertNotIn(not_expected[key],
+ map(lambda x: x[key], body))
+
+ @classmethod
+ def setUpClass(cls):
+ super(UsersV3TestJSON, cls).setUpClass()
+ alt_user = data_utils.rand_name('test_user')
+ alt_password = data_utils.rand_name('pass')
+ cls.alt_email = alt_user + '@testmail.tm'
+ cls.data.setup_test_domain()
+ # Create user with Domain
+ u1_name = data_utils.rand_name('test_user')
+ _, cls.domain_enabled_user = cls.client.create_user(
+ u1_name, password=alt_password,
+ email=cls.alt_email, domain_id=cls.data.domain['id'])
+ cls.data.v3_users.append(cls.domain_enabled_user)
+ # Create default not enabled user
+ u2_name = data_utils.rand_name('test_user')
+ _, cls.non_domain_enabled_user = cls.client.create_user(
+ u2_name, password=alt_password,
+ email=cls.alt_email, enabled=False)
+ cls.data.v3_users.append(cls.non_domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_user_domains(self):
+ # List users with domain
+ params = {'domain_id': self.data.domain['id']}
+ self._list_users_with_params(params, 'domain_id',
+ self.domain_enabled_user,
+ self.non_domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_users_with_not_enabled(self):
+ # List the users with not enabled
+ params = {'enabled': False}
+ self._list_users_with_params(params, 'enabled',
+ self.non_domain_enabled_user,
+ self.domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_users_with_name(self):
+ # List users with name
+ params = {'name': self.domain_enabled_user['name']}
+ self._list_users_with_params(params, 'name',
+ self.domain_enabled_user,
+ self.non_domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_users(self):
+ # List users
+ _, body = self.client.get_users()
+ fetched_ids = [u['id'] for u in body]
+ missing_users = [u['id'] for u in self.data.v3_users
+ if u['id'] not in fetched_ids]
+ self.assertEqual(0, len(missing_users),
+ "Failed to find user %s in fetched list" %
+ ', '.join(m_user for m_user in missing_users))
+
+ @test.attr(type='gate')
+ def test_get_user(self):
+ # Get a user detail
+ _, user = self.client.get_user(self.data.v3_users[0]['id'])
+ self.assertEqual(self.data.v3_users[0]['id'], user['id'])
+ self.assertEqual(self.data.v3_users[0]['name'], user['name'])
+ self.assertEqual(self.alt_email, user['email'])
+ self.assertEqual(self.data.domain['id'], user['domain_id'])
+
+
+class UsersV3TestXML(UsersV3TestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 0991576..8eb7d33 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -121,6 +121,7 @@
self.v3_users = []
self.projects = []
self.v3_roles = []
+ self.domains = []
@property
def test_credentials(self):
@@ -185,6 +186,15 @@
_, self.v3_role = self.client.create_role(self.test_role)
self.v3_roles.append(self.v3_role)
+ def setup_test_domain(self):
+ """Set up a test domain."""
+ self.test_domain = data_utils.rand_name('test_domain')
+ self.test_description = data_utils.rand_name('desc')
+ _, self.domain = self.client.create_domain(
+ name=self.test_domain,
+ description=self.test_description)
+ self.domains.append(self.domain)
+
def teardown_all(self):
for user in self.users:
self.client.delete_user(user['id'])
@@ -198,3 +208,6 @@
self.client.delete_project(v3_project['id'])
for v3_role in self.v3_roles:
self.client.delete_role(v3_role['id'])
+ for domain in self.domains:
+ self.client.update_domain(domain['id'], enabled=False)
+ self.client.delete_domain(domain['id'])
diff --git a/tempest/api/orchestration/stacks/test_soft_conf.py b/tempest/api/orchestration/stacks/test_soft_conf.py
new file mode 100644
index 0000000..8903d4c
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_soft_conf.py
@@ -0,0 +1,163 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class TestSoftwareConfig(base.BaseOrchestrationTest):
+
+ def setUp(self):
+ super(TestSoftwareConfig, self).setUp()
+ self.configs = []
+ # Add 2 sets of software configuration
+ self.configs.append(self._config_create('a'))
+ self.configs.append(self._config_create('b'))
+ # Create a deployment using config a's id
+ self._deployment_create(self.configs[0]['id'])
+
+ def _config_create(self, suffix):
+ configuration = {'group': 'script',
+ 'inputs': [],
+ 'outputs': [],
+ 'options': {}}
+ configuration['name'] = 'heat_soft_config_%s' % suffix
+ configuration['config'] = '#!/bin/bash echo init-%s' % suffix
+ api_config = self.client.create_software_config(**configuration)
+ configuration['id'] = api_config['software_config']['id']
+ self.addCleanup(self._config_delete, configuration['id'])
+ self._validate_config(configuration, api_config)
+ return configuration
+
+ def _validate_config(self, configuration, api_config):
+ # Assert all expected keys are present with matching data
+ for k in configuration.keys():
+ self.assertEqual(configuration[k],
+ api_config['software_config'][k])
+
+ def _deployment_create(self, config_id):
+ self.server_id = data_utils.rand_name('dummy-server')
+ self.action = 'ACTION_0'
+ self.status = 'STATUS_0'
+ self.input_values = {}
+ self.output_values = []
+ self.status_reason = 'REASON_0'
+ self.signal_transport = 'NO_SIGNAL'
+ self.deployment = self.client.create_software_deploy(
+ self.server_id, config_id, self.action, self.status,
+ self.input_values, self.output_values, self.status_reason,
+ self.signal_transport)
+ self.deployment_id = self.deployment['software_deployment']['id']
+ self.addCleanup(self._deployment_delete, self.deployment_id)
+
+ def _deployment_delete(self, deploy_id):
+ self.client.delete_software_deploy(deploy_id)
+ # Testing that it is really gone
+ self.assertRaises(
+ exceptions.NotFound, self.client.get_software_deploy,
+ self.deployment_id)
+
+ def _config_delete(self, config_id):
+ self.client.delete_software_config(config_id)
+ # Testing that it is really gone
+ self.assertRaises(
+ exceptions.NotFound, self.client.get_software_config, config_id)
+
+ @test.attr(type='smoke')
+ def test_get_software_config(self):
+ """Testing software config get."""
+ for conf in self.configs:
+ api_config = self.client.get_software_config(conf['id'])
+ self._validate_config(conf, api_config)
+
+ @test.attr(type='smoke')
+ def test_get_deployment_list(self):
+ """Getting a list of all deployments"""
+ deploy_list = self.client.get_software_deploy_list()
+ deploy_ids = [deploy['id'] for deploy in
+ deploy_list['software_deployments']]
+ self.assertIn(self.deployment_id, deploy_ids)
+
+ @test.attr(type='smoke')
+ def test_get_deployment_metadata(self):
+ """Testing deployment metadata get"""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ conf_ids = [conf['id'] for conf in metadata['metadata']]
+ self.assertIn(self.configs[0]['id'], conf_ids)
+
+ def _validate_deployment(self, action, status, reason, config_id):
+ deployment = self.client.get_software_deploy(self.deployment_id)
+ self.assertEqual(action, deployment['software_deployment']['action'])
+ self.assertEqual(status, deployment['software_deployment']['status'])
+ self.assertEqual(reason,
+ deployment['software_deployment']['status_reason'])
+ self.assertEqual(config_id,
+ deployment['software_deployment']['config_id'])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_create_validate(self):
+ """Testing software deployment was created as expected."""
+ # Asserting that all fields were created
+ self.assert_fields_in_dict(
+ self.deployment['software_deployment'], 'action', 'config_id',
+ 'id', 'input_values', 'output_values', 'server_id', 'status',
+ 'status_reason')
+ # Testing get for this deployment and verifying parameters
+ self._validate_deployment(self.action, self.status,
+ self.status_reason, self.configs[0]['id'])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_update_no_metadata_change(self):
+ """Testing software deployment update without metadata change."""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ # Updating values without changing the configuration ID
+ new_action = 'ACTION_1'
+ new_status = 'STATUS_1'
+ new_reason = 'REASON_1'
+ self.client.update_software_deploy(
+ self.deployment_id, self.server_id, self.configs[0]['id'],
+ new_action, new_status, self.input_values, self.output_values,
+ new_reason, self.signal_transport)
+ # Verifying get and that the deployment was updated as expected
+ self._validate_deployment(new_action, new_status,
+ new_reason, self.configs[0]['id'])
+
+ # Metadata should not be changed at this point
+ test_metadata = self.client.get_software_deploy_meta(self.server_id)
+ for key in metadata['metadata'][0]:
+ self.assertEqual(
+ metadata['metadata'][0][key],
+ test_metadata['metadata'][0][key])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_update_with_metadata_change(self):
+ """Testing software deployment update with metadata change."""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ self.client.update_software_deploy(
+ self.deployment_id, self.server_id, self.configs[1]['id'],
+ self.action, self.status, self.input_values,
+ self.output_values, self.status_reason, self.signal_transport)
+ self._validate_deployment(self.action, self.status,
+ self.status_reason, self.configs[1]['id'])
+ # Metadata should now be changed
+ new_metadata = self.client.get_software_deploy_meta(self.server_id)
+ # Its enough to test the ID in this case
+ meta_id = metadata['metadata'][0]['id']
+ test_id = new_metadata['metadata'][0]['id']
+ self.assertNotEqual(meta_id, test_id)
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index ad0aa29..a16e425 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -48,50 +48,52 @@
}
}
+common_show_server = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'image': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'flavor': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'user_id': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'links': parameter_types.links,
+ 'addresses': parameter_types.addresses,
+ },
+ # NOTE(GMann): 'progress' attribute is present in the response
+ # only when server's status is one of the progress statuses
+ # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
+ # So it is not defined as 'required'.
+ 'required': ['id', 'name', 'status', 'image', 'flavor',
+ 'user_id', 'tenant_id', 'created', 'updated',
+ 'metadata', 'links', 'addresses']
+}
+
base_update_get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'server': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'name': {'type': 'string'},
- 'status': {'type': 'string'},
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
- 'flavor': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
- 'user_id': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'created': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'links': parameter_types.links,
- 'addresses': parameter_types.addresses,
- },
- # NOTE(GMann): 'progress' attribute is present in the response
- # only when server's status is one of the progress statuses
- # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
- # So it is not defined as 'required'.
- 'required': ['id', 'name', 'status', 'image', 'flavor',
- 'user_id', 'tenant_id', 'created', 'updated',
- 'metadata', 'links', 'addresses']
- }
+ 'server': common_show_server
},
'required': ['server']
}
@@ -179,3 +181,40 @@
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message']
}
+
+instance_action_events = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'event': {'type': 'string'},
+ 'start_time': {'type': 'string'},
+ 'finish_time': {'type': 'string'},
+ 'result': {'type': 'string'},
+ 'traceback': {'type': ['string', 'null']}
+ },
+ 'required': ['event', 'start_time', 'finish_time', 'result',
+ 'traceback']
+ }
+}
+
+common_get_instance_action = copy.deepcopy(common_instance_actions)
+
+common_get_instance_action['properties'].update({
+ 'events': instance_action_events})
+# 'events' does not come in response body always so it is not
+# defined as 'required'
+
+base_list_servers_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'servers': {
+ 'type': 'array',
+ 'items': common_show_server
+ }
+ },
+ 'required': ['servers']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index dc4054c..95c5760 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -240,3 +240,33 @@
'required': ['instanceActions']
}
}
+
+get_instance_actions_object = copy.deepcopy(servers.common_get_instance_action)
+get_instance_actions_object[
+ 'properties'].update({'instance_uuid': {'type': 'string'}})
+get_instance_actions_object['required'].extend(['instance_uuid'])
+
+get_instance_action = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'instanceAction': get_instance_actions_object
+ },
+ 'required': ['instanceAction']
+ }
+}
+
+list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({
+ 'hostId': {'type': 'string'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6
+ })
+# NOTE(GMann): OS-DCF:diskConfig and accessIPv4/v6 are API
+# extensions, and some environments return a response
+# without these attributes. So they are not 'required'.
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('hostId')
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 1af951f..541d3ff 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -26,7 +26,7 @@
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
- 'volumeType': {'type': 'string'},
+ 'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
@@ -74,7 +74,7 @@
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
- 'volumeType': {'type': 'string'},
+ 'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 3b50516..dc800cd 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -151,3 +151,33 @@
'required': ['server_actions']
}
}
+
+get_server_actions_object = copy.deepcopy(servers.common_get_instance_action)
+get_server_actions_object[
+ 'properties'].update({'server_uuid': {'type': 'string'}})
+get_server_actions_object['required'].extend(['server_uuid'])
+
+get_server_action = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server_action': get_server_actions_object
+ },
+ 'required': ['server_action']
+ }
+}
+
+list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({
+ 'addresses': addresses_v3,
+ 'host_id': {'type': 'string'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+ })
+# NOTE(GMann): os-access-ips:access_ip_v4/v6 are API extension,
+# and some environments return a response without these
+# attributes. So they are not 'required'.
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('host_id')
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 0571f4f..ba94c82 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -95,14 +95,15 @@
return self.cmd_with_auth(
'neutron', action, flags, params, admin, fail_ok)
- def sahara(self, action, flags='', params='', admin=True, fail_ok=False):
+ def sahara(self, action, flags='', params='', admin=True,
+ fail_ok=False, merge_stderr=True):
"""Executes sahara command for the given action."""
flags += ' --endpoint-type %s' % CONF.data_processing.endpoint_type
return self.cmd_with_auth(
- 'sahara', action, flags, params, admin, fail_ok)
+ 'sahara', action, flags, params, admin, fail_ok, merge_stderr)
def cmd_with_auth(self, cmd, action, flags='', params='',
- admin=True, fail_ok=False):
+ admin=True, fail_ok=False, merge_stderr=False):
"""Executes given command with auth attributes appended."""
# TODO(jogo) make admin=False work
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
@@ -112,7 +113,7 @@
CONF.identity.admin_password,
CONF.identity.uri))
flags = creds + ' ' + flags
- return self.cmd(cmd, action, flags, params, fail_ok)
+ return self.cmd(cmd, action, flags, params, fail_ok, merge_stderr)
def cmd(self, cmd, action, flags='', params='', fail_ok=False,
merge_stderr=False):
@@ -120,7 +121,7 @@
cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
flags, action, params])
LOG.info("running: '%s'" % cmd)
- cmd = shlex.split(cmd)
+ cmd = shlex.split(cmd.encode('utf-8'))
result = ''
result_err = ''
stdout = subprocess.PIPE
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index 36cc324..f00dcae 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -12,8 +12,8 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
import logging
+import re
import subprocess
from tempest import cli
@@ -138,3 +138,30 @@
'cluster_id',
'status'
])
+
+ def test_sahara_bash_completion(self):
+ self.sahara('bash-completion')
+
+ # Optional arguments
+ def test_sahara_help(self):
+ help_text = self.sahara('help')
+ lines = help_text.split('\n')
+ self.assertFirstLineStartsWith(lines, 'usage: sahara')
+
+ commands = []
+ cmds_start = lines.index('Positional arguments:')
+ cmds_end = lines.index('Optional arguments:')
+ command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
+ for line in lines[cmds_start:cmds_end]:
+ match = command_pattern.match(line)
+ if match:
+ commands.append(match.group(1))
+ commands = set(commands)
+ wanted_commands = set(('cluster-create', 'data-source-create',
+ 'image-unregister', 'job-binary-create',
+ 'plugin-list', 'job-binary-create', 'help'))
+ self.assertFalse(wanted_commands - commands)
+
+ def test_sahara_version(self):
+ version = self.sahara('', flags='--version')
+ self.assertTrue(re.search('[0-9.]+', version))
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 19e816b..0b72b1c 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -169,7 +169,7 @@
def collect_users(users):
global USERS
- LOG.info("Creating users")
+ LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
@@ -202,6 +202,7 @@
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
+ LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
_, found = client.identity.get_user(user['id'])
@@ -217,6 +218,7 @@
def check_objects(self):
"""Check that the objects created are still there."""
+ LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
@@ -226,6 +228,7 @@
def check_servers(self):
"""Check that the servers are still up and running."""
+ LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
@@ -242,6 +245,7 @@
def check_volumes(self):
"""Check that the volumes are still there and attached."""
+ LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
found = _get_volume_by_name(client, volume['name'])
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 2ab008d..6583475 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -25,7 +25,7 @@
def sudo_cmd_call(cmd):
- args = shlex.split(cmd)
+ args = shlex.split(cmd.encode('utf-8'))
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
@@ -84,7 +84,7 @@
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
'file1': file_from,
'dest': dest}
- args = shlex.split(cmd)
+ args = shlex.split(cmd.encode('utf-8'))
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
proc = subprocess.Popen(args, **subprocess_args)
diff --git a/tempest/common/utils/data_utils.py b/tempest/common/utils/data_utils.py
index a0a88dd..174e557 100644
--- a/tempest/common/utils/data_utils.py
+++ b/tempest/common/utils/data_utils.py
@@ -34,6 +34,11 @@
return randbits
+def rand_url():
+ randbits = str(random.randint(1, 0x7fffffff))
+ return 'https://url-' + randbits + '.com'
+
+
def rand_int_id(start=0, end=0x7fffffff):
return random.randint(start, end)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index d8474a0..d242c14 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -22,16 +22,6 @@
LOG = logging.getLogger(__name__)
-def _console_dump(client, server_id):
- try:
- resp, output = client.get_console_output(server_id, None)
- LOG.debug("Console Output for Server %s:\n%s" % (
- server_id, output))
- except exceptions.NotFound:
- LOG.debug("Server %s: doesn't have a console" % server_id)
- pass
-
-
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
@@ -81,10 +71,12 @@
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
-
if (server_status == 'ERROR') and raise_on_error:
- _console_dump(client, server_id)
- raise exceptions.BuildErrorException(server_id=server_id)
+ if 'fault' in body:
+ raise exceptions.BuildErrorException(body['fault'],
+ server_id=server_id)
+ else:
+ raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
@@ -99,11 +91,9 @@
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
-
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
- _console_dump(client, server_id)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 800b3b0..8191984 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -141,7 +141,7 @@
server_or_ip=ip,
private_key=private_key)
- # Write a backend's responce into a file
+ # Write a backend's response into a file
resp = """echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n""" \
"""Connection: close\r\nContent-Type: text/html; """ \
"""charset=UTF-8\r\n\r\n%s"; cat >/dev/null"""
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 69d2f35..80bb711 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -164,6 +164,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
@@ -473,6 +474,7 @@
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
+ self.validate_response(schema.get_instance_action, resp, body)
return resp, body['instanceAction']
def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index d933998..a5b31d3 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -166,6 +166,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
@@ -470,6 +471,7 @@
resp, body = self.get("servers/%s/os-server-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
+ self.validate_response(schema.get_server_action, resp, body)
return resp, body['server_action']
def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 8c72dfa..329f026 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -14,6 +14,7 @@
# under the License.
import json
+import urllib
from tempest.common import rest_client
from tempest import config
@@ -83,9 +84,12 @@
body = json.loads(body)
return resp, body['projects']
- def get_users(self):
+ def get_users(self, params=None):
"""Get the list of users."""
- resp, body = self.get("users")
+ url = 'users'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['users']
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 242b032..3790f13 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -14,6 +14,7 @@
# under the License.
import json
+import urllib
from lxml import etree
@@ -76,6 +77,14 @@
array.append(common.xml_to_json(child))
return array
+ def _parse_users(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "user":
+ array.append(common.xml_to_json(child))
+ return array
+
def _parse_array(self, node):
array = []
for child in node.getchildren():
@@ -137,11 +146,14 @@
body = self._parse_projects(etree.fromstring(body))
return resp, body
- def get_users(self):
+ def get_users(self, params=None):
"""Get the list of users."""
- resp, body = self.get("users")
+ url = 'users'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
self.expected_success(200, resp.status)
- body = self._parse_array(etree.fromstring(body))
+ body = self._parse_users(etree.fromstring(body))
return resp, body
def get_user(self, user_id):
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index c459f28..d325eb5 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -259,3 +259,140 @@
'parameters': parameters,
}
return self._validate_template(post_body)
+
+ def create_software_config(self, name=None, config=None, group=None,
+ inputs=None, outputs=None, options=None):
+ headers, body = self._prep_software_config_create(
+ name, config, group, inputs, outputs, options)
+
+ url = 'software_configs'
+ resp, body = self.post(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_config(self, conf_id):
+ """Returns a software configuration resource."""
+ url = 'software_configs/%s' % str(conf_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def delete_software_config(self, conf_id):
+ """Deletes a specific software configuration."""
+ url = 'software_configs/%s' % str(conf_id)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp)
+
+ def create_software_deploy(self, server_id=None, config_id=None,
+ action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None, signal_transport=None):
+ """Creates or updates a software deployment."""
+ headers, body = self._prep_software_deploy_update(
+ None, server_id, config_id, action, status, input_values,
+ output_values, status_reason, signal_transport)
+
+ url = 'software_deployments'
+ resp, body = self.post(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def update_software_deploy(self, deploy_id=None, server_id=None,
+ config_id=None, action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None, signal_transport=None):
+ """Creates or updates a software deployment."""
+ headers, body = self._prep_software_deploy_update(
+ deploy_id, server_id, config_id, action, status, input_values,
+ output_values, status_reason, signal_transport)
+
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, body = self.put(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy_list(self):
+ """Returns a list of all deployments."""
+ url = 'software_deployments'
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy(self, deploy_id):
+ """Returns a specific software deployment."""
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy_meta(self, server_id):
+ """Return a config metadata for a specific server."""
+ url = 'software_deployments/metadata/%s' % server_id
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def delete_software_deploy(self, deploy_id):
+ """Deletes a specific software deployment."""
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp)
+
+ def _prep_software_config_create(self, name=None, conf=None, group=None,
+ inputs=None, outputs=None, options=None):
+ """Prepares a software configuration body."""
+ post_body = {}
+ if name is not None:
+ post_body["name"] = name
+ if conf is not None:
+ post_body["config"] = conf
+ if group is not None:
+ post_body["group"] = group
+ if inputs is not None:
+ post_body["inputs"] = inputs
+ if outputs is not None:
+ post_body["outputs"] = outputs
+ if options is not None:
+ post_body["options"] = options
+ body = json.dumps(post_body)
+
+ headers = self.get_headers()
+ return headers, body
+
+ def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
+ config_id=None, action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None,
+ signal_transport=None):
+ """Prepares a deployment create or update (if an id was given)."""
+ post_body = {}
+
+ if deploy_id is not None:
+ post_body["id"] = deploy_id
+ if server_id is not None:
+ post_body["server_id"] = server_id
+ if config_id is not None:
+ post_body["config_id"] = config_id
+ if action is not None:
+ post_body["action"] = action
+ if status is not None:
+ post_body["status"] = status
+ if input_values is not None:
+ post_body["input_values"] = input_values
+ if output_values is not None:
+ post_body["output_values"] = output_values
+ if status_reason is not None:
+ post_body["status_reason"] = status_reason
+ if signal_transport is not None:
+ post_body["signal_transport"] = signal_transport
+ body = json.dumps(post_body)
+
+ headers = self.get_headers()
+ return headers, body
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index c77faca..a7af619 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -102,7 +102,7 @@
}
}
- unkown_type_schema = {
+ unknown_type_schema = {
"type": "not_defined"
}
@@ -131,7 +131,7 @@
def test_generate_with_unknown_type(self):
self.assertRaises(TypeError, self.generator.generate,
- self.unkown_type_schema)
+ self.unknown_type_schema)
class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index 5a334c5..3dc2199 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -32,7 +32,7 @@
cmd = ' '.join([cmd, param])
LOG.info("running: '%s'" % cmd)
cmd_str = cmd
- cmd = shlex.split(cmd)
+ cmd = shlex.split(cmd.encode('utf-8'))
result = ''
result_err = ''
try:
diff --git a/tempest/tests/test_waiters.py b/tempest/tests/test_waiters.py
index 1f9825e..a29cb46 100644
--- a/tempest/tests/test_waiters.py
+++ b/tempest/tests/test_waiters.py
@@ -15,6 +15,7 @@
import time
import mock
+import testtools
from tempest.common import waiters
from tempest import exceptions
@@ -47,3 +48,221 @@
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
+
+
+class TestServerWaiters(base.TestCase):
+ def setUp(self):
+ super(TestServerWaiters, self).setUp()
+ self.client = mock.MagicMock()
+ self.client.build_timeout = 1
+ self.client.build_interval = 1
+
+ def test_wait_for_server_status(self):
+ self.client.get_server.return_value = (None, {'status':
+ 'active'}
+ )
+ start_time = int(time.time())
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'active'
+ )
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertTrue((end_time - start_time) < 2)
+
+ def test_wait_for_server_status_BUILD_from_not_UNKNOWN(self):
+ self.client.get_server.return_value = (None, {'status': 'active'})
+ start_time = int(time.time())
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'BUILD')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertTrue((end_time - start_time) < 2)
+
+ def test_wait_for_server_status_ready_wait_with_BUILD(self):
+ self.client.get_server.return_value = (None, {'status': 'BUILD'})
+ start_time = int(time.time())
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'BUILD', True)
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertTrue((end_time - start_time) < 2)
+
+ def test_wait_for_server_status_ready_wait(self):
+ self.client.get_server.return_value = (None, {'status':
+ 'ERROR',
+ 'OS-EXT-STS:task_state':
+ 'n/a'
+ }
+ )
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'active status and task state n/a within the '
+ 'required time (1 s).\nCurrent status: SUSPENDED.'
+ '\nCurrent task state: None.'}
+ )
+ self.assertRaises(exceptions.BuildErrorException,
+ waiters.wait_for_server_status,
+ self.client, 'fake_svr_id', 'active',
+ ready_wait=True, extra_timeout=0,
+ raise_on_error=True
+ )
+
+ def test_wait_for_server_status_no_ready_wait(self):
+ self.client.get_server.return_value = (None, {'status':
+ 'ERROR',
+ 'OS-EXT-STS:task_state':
+ 'n/a'
+ }
+ )
+ start_time = int(time.time())
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'ERROR', ready_wait=False,
+ extra_timeout=10, raise_on_error=True
+ )
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout + extra_timeout
+ self.assertTrue((end_time - start_time) < 12)
+
+ def test_wait_for_server_status_timeout(self):
+ self.client.get_server.return_value = (None, {'status': 'SUSPENDED'})
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'active status and task state n/a within the '
+ 'required time (1 s).\nCurrent status: SUSPENDED.'
+ '\nCurrent task state: None.'}
+ )
+ self.assertRaises(exceptions.TimeoutException,
+ waiters.wait_for_server_status,
+ self.client, 'fake_svr_id', 'active')
+
+ def test_wait_for_server_status_extra_timeout(self):
+ self.client.get_server.return_value = (None, {'status': 'SUSPENDED'})
+ start_time = int(time.time())
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'active status and task state n/a within the '
+ 'required time (10 s). \nCurrent status: SUSPENDED.'
+ '\nCurrent task state: None.'}
+ )
+ self.assertRaises(exceptions.TimeoutException,
+ waiters.wait_for_server_status,
+ self.client, 'fake_svr_id',
+ 'active', ready_wait=True,
+ extra_timeout=10, raise_on_error=True
+ )
+ end_time = int(time.time())
+ # Ensure waiter returns after build_timeout but
+ # before build_timeout+extra timeout
+ self.assertTrue(10 < (end_time - start_time) < 12)
+
+ def test_wait_for_server_status_error_on_server_create(self):
+ self.client.get_server.return_value = (None, {'status': 'ERROR'})
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'activestatus and task state n/a within the '
+ 'required time (1 s).\nCurrent status: ERROR.'
+ '\nCurrent task state: None.'}
+ )
+ self.assertRaises(exceptions.BuildErrorException,
+ waiters.wait_for_server_status,
+ self.client, 'fake_svr_id', 'active')
+
+ def test_wait_for_server_status_no_raise_on_error(self):
+ self.client.get_server.return_value = (None, {'status': 'ERROR'})
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'activestatus and task state n/a within the '
+ 'required time (1 s).\nCurrent status: ERROR.'
+ '\nCurrent task state: None.'}
+ )
+ self.assertRaises(exceptions.TimeoutException,
+ waiters.wait_for_server_status,
+ self.client, 'fake_svr_id', 'active',
+ ready_wait=True, extra_timeout=0,
+ raise_on_error=False
+ )
+
+ def test_wait_for_server_status_no_ready_wait_timeout(self):
+ self.client.get_server.return_value = (None, {'status': 'ERROR'})
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'active status and task state n/a within the '
+ 'required time (11 s).\nCurrent status: ERROR.'
+ '\nCurrent task state: None.'}
+ )
+ expected_msg = '''Request timed out
+Details: (TestServerWaiters:test_wait_for_server_status_no_ready_wait_timeout)\
+ Server fake_svr_id failed to reach active status and task state "n/a" within\
+ the required time (11 s). Current status: ERROR. Current task state: None.\
+'''
+ with testtools.ExpectedException(exceptions.TimeoutException,
+ testtools.matchers.AfterPreprocessing(
+ str,
+ testtools.matchers.Equals(expected_msg)
+ )
+ ):
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'active', ready_wait=False,
+ extra_timeout=10,
+ raise_on_error=False
+ )
+
+ def test_wait_for_server_status_ready_wait_timeout(self):
+ self.client.get_server.return_value = (None, {'status': 'ERROR'})
+ self.client.get_console_output.return_value = (None,
+ {'output': 'Server fake_svr_id failed to reach '
+ 'activestatus and task state n/a within the '
+ 'required time (11 s).\nCurrent status: ERROR.'
+ '\nCurrent task state: None.'}
+ )
+ expected_msg = '''Request timed out
+Details: (TestServerWaiters:test_wait_for_server_status_ready_wait_timeout)\
+ Server fake_svr_id failed to reach active status and task state "None" within\
+ the required time (11 s). Current status: ERROR. Current task state: None.\
+'''
+ with testtools.ExpectedException(exceptions.TimeoutException,
+ testtools.matchers.AfterPreprocessing(
+ str,
+ testtools.matchers.Equals(expected_msg)
+ )
+ ):
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'active', ready_wait=True,
+ extra_timeout=10,
+ raise_on_error=False
+ )
+
+ def test_wait_for_changing_server_status(self):
+ self.client.get_server.side_effect = [(None, {'status': 'BUILD'}),
+ (None, {'status': 'active'})]
+ start_time = int(time.time())
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'active', ready_wait=True,
+ extra_timeout=10,
+ raise_on_error=True
+ )
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout + extra_timeout
+ self.assertTrue((end_time - start_time) < 12)
+
+ def test_wait_for_changing_server_task_status(self):
+ self.client.get_server.side_effect = [(None, {'status': 'BUILD',
+ 'OS-EXT-STS:task_state':
+ 'n/a'
+ }
+ ),
+ (None, {'status': 'active',
+ 'OS-EXT-STS:task_state':
+ 'None'
+ }
+ )
+ ]
+ start_time = int(time.time())
+ waiters.wait_for_server_status(self.client, 'fake_svr_id',
+ 'active', ready_wait=True,
+ extra_timeout=10,
+ raise_on_error=True
+ )
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout + extra_timeout
+ self.assertTrue((end_time - start_time) < 12)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 7713931..2c68d6b 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -177,7 +177,10 @@
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
- self.assertEqual(len(tags), 0, str(tags))
+
+ # NOTE: Volume-attach and detach causes metadata (tags) to be created
+ # for the volume. So exclude them while asserting.
+ self.assertNotIn('key1', tags)
for instance in reservation.instances:
instance.stop()
diff --git a/test-requirements.txt b/test-requirements.txt
index 13ef291..cd8154b 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,6 +1,5 @@
hacking>=0.9.2,<0.10
# needed for doc build
-docutils==0.9.1
sphinx>=1.1.2,!=1.2.0,<1.3
python-subunit>=0.0.18
oslosphinx
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
index c6f8eab..8ad59bb 100755
--- a/tools/subunit-trace.py
+++ b/tools/subunit-trace.py
@@ -263,7 +263,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
- 'debug infomation in realtime')
+ 'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')