Merge "Fix exception when api_extensions is set to empty"
diff --git a/doc/source/index.rst b/doc/source/index.rst
index c45273e..25bc900 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,8 +1,3 @@
-.. Tempest documentation master file, created by
- sphinx-quickstart on Tue May 21 17:43:32 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
=======================
Tempest Testing Project
=======================
diff --git a/requirements.txt b/requirements.txt
index ab2903a..9a3b74d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,11 +7,12 @@
boto>=2.12.0,!=2.13.0
paramiko>=1.13.0
netaddr>=0.7.6
-python-glanceclient>=0.9.0
+python-ceilometerclient>=1.0.6
+python-glanceclient>=0.13.1
python-keystoneclient>=0.9.0
python-novaclient>=2.17.0
-python-neutronclient>=2.3.4,<3
-python-cinderclient>=1.0.6
+python-neutronclient>=2.3.5,<3
+python-cinderclient>=1.0.7
python-heatclient>=0.2.9
python-ironicclient
python-saharaclient>=0.6.0
diff --git a/run_tempest.sh b/run_tempest.sh
index bdd1f69..5a9b742 100755
--- a/run_tempest.sh
+++ b/run_tempest.sh
@@ -58,7 +58,7 @@
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
- *) testrargs+="$testrargs $1";;
+ *) testrargs="$testrargs $1";;
esac
shift
done
diff --git a/tempest/README.rst b/tempest/README.rst
index dbac809..fb25151 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -23,9 +23,8 @@
belongs in each directory, the rules and examples for good tests, are
documented in a README.rst file in the directory.
-
-api
----
+:ref:`api_field_guide`
+----------------------
API tests are validation tests for the OpenStack API. They should not
use the existing python clients for OpenStack, but should instead use
@@ -39,8 +38,8 @@
frameworks.
-cli
----
+:ref:`cli_field_guide`
+----------------------
CLI tests use the openstack CLI to interact with the OpenStack
cloud. CLI testing in unit tests is somewhat difficult because unlike
@@ -49,8 +48,8 @@
prereqs having a running OpenStack cloud.
-scenario
---------
+:ref:`scenario_field_guide`
+---------------------------
Scenario tests are complex "through path" tests for OpenStack
functionality. They are typically a series of steps where complicated
@@ -59,21 +58,26 @@
Scenario tests can and should use the OpenStack python clients.
-stress
-------
+:ref:`stress_field_guide`
+-------------------------
-Stress tests are designed to stress an OpenStack environment by
-running a high workload against it and seeing what breaks. Tools may
-be provided to help detect breaks (stack traces in the logs).
+Stress tests are designed to stress an OpenStack environment by running a high
+workload against it and seeing what breaks. The stress test framework runs
+several test jobs in parallel and can run any existing test in Tempest as a
+stress job.
-TODO: old stress tests deleted, new_stress that david is working on
-moves into here.
-
-
-thirdparty
-----------
+:ref:`third_party_field_guide`
+-----------------------------
Many openstack components include 3rdparty API support. It is
completely legitimate for Tempest to include tests of 3rdparty APIs,
but those should be kept separate from the normal OpenStack
validation.
+
+:ref:`unit_tests_field_guide`
+-----------------------------
+
+Unit tests are the self checks for Tempest. They provide functional
+verification and regression checking for the internal components of tempest.
+They should be used to just verify that the individual pieces of tempest are
+working as expected.
diff --git a/tempest/api/README.rst b/tempest/api/README.rst
index 9eac19d..91e6ad6 100644
--- a/tempest/api/README.rst
+++ b/tempest/api/README.rst
@@ -1,3 +1,5 @@
+.. _api_field_guide:
+
Tempest Field Guide to API tests
================================
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 6f7e438..62edd10 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -21,6 +21,14 @@
CONF = config.CONF
+# NOTE(adam_g): The baremetal API tests exercise operations such as enroll
+# node, power on, power off, etc. Testing against real drivers (ie, IPMI)
+# will require passing driver-specific data to Tempest (addresses,
+# credentials, etc). Until then, only support testing against the fake driver,
+# which has no external dependencies.
+SUPPORTED_DRIVERS = ['fake']
+
+
def creates(resource):
"""Decorator that adds resources to the appropriate cleanup list."""
@@ -48,6 +56,13 @@
skip_msg = ('%s skipped as Ironic is not available' % cls.__name__)
raise cls.skipException(skip_msg)
+ if CONF.baremetal.driver not in SUPPORTED_DRIVERS:
+ skip_msg = ('%s skipped as Ironic driver %s is not supported for '
+ 'testing.' %
+ (cls.__name__, CONF.baremetal.driver))
+ raise cls.skipException(skip_msg)
+ cls.driver = CONF.baremetal.driver
+
mgr = clients.AdminManager()
cls.client = mgr.baremetal_client
cls.power_timeout = CONF.baremetal.power_timeout
@@ -85,7 +100,7 @@
@classmethod
@creates('node')
def create_node(cls, chassis_id, cpu_arch='x86', cpu_num=8, storage=1024,
- memory=4096, driver='fake'):
+ memory=4096):
"""
Wrapper utility for creating test baremetal nodes.
@@ -98,7 +113,7 @@
"""
resp, body = cls.client.create_node(chassis_id, cpu_arch=cpu_arch,
cpu_num=cpu_num, storage=storage,
- memory=memory, driver=driver)
+ memory=memory, driver=cls.driver)
return resp, body
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 9555367..3a6de36 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -26,7 +26,7 @@
@classmethod
def setUpClass(cls):
super(AZAdminV3Test, cls).setUpClass()
- cls.client = cls.os_adm.availability_zone_client
+ cls.client = cls.availability_zone_admin_client
@test.attr(type='gate')
def test_get_availability_zone_list(self):
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 48f9ffb..85b26a1 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -86,8 +86,27 @@
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
- has_valid_uptime = False
+ # Ironic will register each baremetal node as a 'hypervisor',
+ # so the hypervisor list can contain many hypervisors of type
+ # 'ironic'. If they are ALL ironic, skip this test since ironic
+ # doesn't support hypervisor uptime. Otherwise, remove them
+ # from the list of hypervisors to test.
+ ironic_only = True
+ hypers_without_ironic = []
for hyper in hypers:
+ resp, details = (self.client.
+ get_hypervisor_show_details(hypers[0]['id']))
+ self.assertEqual(200, resp.status)
+ if details['hypervisor_type'] != 'ironic':
+ hypers_without_ironic.append(hyper)
+ ironic_only = False
+
+ if ironic_only:
+ raise self.skipException(
+ "Ironic does not support hypervisor uptime")
+
+ has_valid_uptime = False
+ for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index cccaf13..f4d010e 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -127,6 +127,8 @@
self.client.migrate_server,
str(uuid.uuid4()))
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index caf4174..70a9604 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -392,8 +392,11 @@
msg = ("Missing Compute Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
+ if cls._api_version == 2:
+ cls.availability_zone_admin_client = (
+ cls.os_adm.availability_zone_client)
- if cls._api_version == 3:
+ else:
cls.servers_admin_client = cls.os_adm.servers_v3_client
cls.services_admin_client = cls.os_adm.services_v3_client
cls.availability_zone_admin_client = \
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 91eb4c5..9036726 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import StringIO
+
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
@@ -31,25 +33,21 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
+ cls.glance_client = cls.os.image_client
cls.client = cls.images_client
cls.image_id = None
- resp, server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
-
- # Snapshot the server once to save time
name = data_utils.rand_name('image')
- resp, _ = cls.client.create_image(cls.server_id, name, {})
- cls.image_id = resp['location'].rsplit('/', 1)[1]
-
+ resp, body = cls.glance_client.create_image(name=name,
+ container_format='bare',
+ disk_format='raw',
+ is_public=False)
+ cls.image_id = body['id']
+ cls.images.append(cls.image_id)
+ image_file = StringIO.StringIO(('*' * 1024))
+ cls.glance_client.update_image(cls.image_id, data=image_file)
cls.client.wait_for_image_status(cls.image_id, 'ACTIVE')
- @classmethod
- def tearDownClass(cls):
- if cls.image_id:
- cls.client.delete_image(cls.image_id)
- super(ImagesMetadataTestJSON, cls).tearDownClass()
-
def setUp(self):
super(ImagesMetadataTestJSON, self).setUp()
meta = {'key1': 'value1', 'key2': 'value2'}
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 86ee4a4..f9350e1 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -13,7 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import StringIO
+import time
+
from tempest.api.compute import base
+from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
@@ -32,7 +36,34 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
cls.client = cls.images_client
+ cls.glance_client = cls.os.image_client
+ def _create_image():
+ name = data_utils.rand_name('image')
+ _, body = cls.glance_client.create_image(name=name,
+ container_format='bare',
+ disk_format='raw',
+ is_public=False)
+ image_id = body['id']
+ cls.images.append(image_id)
+ # Wait 1 second between creation and upload to ensure a delta
+ # between created_at and updated_at.
+ time.sleep(1)
+ image_file = StringIO.StringIO(('*' * 1024))
+ cls.glance_client.update_image(image_id, data=image_file)
+ cls.client.wait_for_image_status(image_id, 'ACTIVE')
+ _, body = cls.client.get_image(image_id)
+ return body
+
+ # Create non-snapshot images via glance
+ cls.image1 = _create_image()
+ cls.image1_id = cls.image1['id']
+ cls.image2 = _create_image()
+ cls.image2_id = cls.image2['id']
+ cls.image3 = _create_image()
+ cls.image3_id = cls.image3['id']
+
+ # Create instances and snapshots via nova
try:
resp, cls.server1 = cls.create_test_server()
resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
@@ -41,21 +72,21 @@
'ACTIVE')
# Create images to be used in the filter tests
- resp, cls.image1 = cls.create_image_from_server(
+ resp, cls.snapshot1 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
- cls.image1_id = cls.image1['id']
+ cls.snapshot1_id = cls.snapshot1['id']
# Servers have a hidden property for when they are being imaged
# Performing back-to-back create image calls on a single
# server will sometimes cause failures
- resp, cls.image3 = cls.create_image_from_server(
+ resp, cls.snapshot3 = cls.create_image_from_server(
cls.server2['id'], wait_until='ACTIVE')
- cls.image3_id = cls.image3['id']
+ cls.snapshot3_id = cls.snapshot3['id']
# Wait for the server to be active after the image upload
- resp, cls.image2 = cls.create_image_from_server(
+ resp, cls.snapshot2 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
- cls.image2_id = cls.image2['id']
+ cls.snapshot2_id = cls.snapshot2['id']
except Exception:
LOG.exception('setUpClass failed')
cls.tearDownClass()
@@ -89,11 +120,14 @@
params = {'server': self.server1['id']}
resp, images = self.client.list_images(params)
- self.assertTrue(any([i for i in images if i['id'] == self.image1_id]),
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot1_id]),
"Failed to find image %s in images. Got images %s" %
(self.image1_id, images))
- self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
- self.assertFalse(any([i for i in images if i['id'] == self.image3_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot2_id]))
+ self.assertFalse(any([i for i in images
+ if i['id'] == self.snapshot3_id]))
@test.attr(type='gate')
def test_list_images_filter_by_server_ref(self):
@@ -106,11 +140,11 @@
resp, images = self.client.list_images(params)
self.assertFalse(any([i for i in images
- if i['id'] == self.image1_id]))
+ if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
- if i['id'] == self.image2_id]))
+ if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
- if i['id'] == self.image3_id]))
+ if i['id'] == self.snapshot3_id]))
@test.attr(type='gate')
def test_list_images_filter_by_type(self):
@@ -118,10 +152,14 @@
params = {'type': 'snapshot'}
resp, images = self.client.list_images(params)
- self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
- self.assertFalse(any([i for i in images if i['id'] == self.image_ref]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot1_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot2_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot3_id]))
+ self.assertFalse(any([i for i in images
+ if i['id'] == self.image_ref]))
@test.attr(type='gate')
def test_list_images_limit_results(self):
@@ -184,11 +222,11 @@
resp, images = self.client.list_images_with_detail(params)
self.assertFalse(any([i for i in images
- if i['id'] == self.image1_id]))
+ if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
- if i['id'] == self.image2_id]))
+ if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
- if i['id'] == self.image3_id]))
+ if i['id'] == self.snapshot3_id]))
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_type(self):
@@ -197,10 +235,14 @@
resp, images = self.client.list_images_with_detail(params)
resp, image4 = self.client.get_image(self.image_ref)
- self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
- self.assertFalse(any([i for i in images if i['id'] == self.image_ref]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot1_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot2_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot3_id]))
+ self.assertFalse(any([i for i in images
+ if i['id'] == self.image_ref]))
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_changes_since(self):
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index f66020c..9d39c9f 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -203,11 +203,13 @@
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
+ test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
- self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers])
+ self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
+ if x['id'] in test_ids])
@test.attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
diff --git a/tempest/api/compute/v3/admin/test_hypervisor.py b/tempest/api/compute/v3/admin/test_hypervisor.py
index f3397a8..9a23789 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor.py
@@ -83,7 +83,27 @@
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
- resp, uptime = self.client.get_hypervisor_uptime(hypers[0]['id'])
+ # Ironic will register each baremetal node as a 'hypervisor',
+ # so the hypervisor list can contain many hypervisors of type
+ # 'ironic'. If they are ALL ironic, skip this test since ironic
+ # doesn't support hypervisor uptime. Otherwise, remove them
+ # from the list of hypervisors to test.
+ ironic_only = True
+ hypers_without_ironic = []
+ for hyper in hypers:
+ resp, details = (self.client.
+ get_hypervisor_show_details(hypers[0]['id']))
+ self.assertEqual(200, resp.status)
+ if details['hypervisor_type'] != 'ironic':
+ hypers_without_ironic.append(hyper)
+ ironic_only = False
+
+ if ironic_only:
+ raise self.skipException(
+ "Ironic does not support hypervisor uptime")
+
+ resp, uptime = self.client.get_hypervisor_uptime(
+ hypers_without_ironic[0]['id'])
self.assertEqual(200, resp.status)
self.assertTrue(len(uptime) > 0)
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index ab0e83a..cfb5a3d 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -75,16 +75,16 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_node_group_template(name, plugin_name,
- hadoop_version,
- node_processes,
- flavor_id,
- node_configs,
- **kwargs)
+ _, resp_body = cls.client.create_node_group_template(name, plugin_name,
+ hadoop_version,
+ node_processes,
+ flavor_id,
+ node_configs,
+ **kwargs)
# store id of created node group template
- cls._node_group_templates.append(body['id'])
+ cls._node_group_templates.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_cluster_template(cls, name, plugin_name, hadoop_version,
@@ -95,15 +95,15 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_cluster_template(name, plugin_name,
- hadoop_version,
- node_groups,
- cluster_configs,
- **kwargs)
+ _, resp_body = cls.client.create_cluster_template(name, plugin_name,
+ hadoop_version,
+ node_groups,
+ cluster_configs,
+ **kwargs)
# store id of created cluster template
- cls._cluster_templates.append(body['id'])
+ cls._cluster_templates.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_data_source(cls, name, type, url, **kwargs):
@@ -113,11 +113,11 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+ _, resp_body = cls.client.create_data_source(name, type, url, **kwargs)
# store id of created data source
- cls._data_sources.append(body['id'])
+ cls._data_sources.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_job_binary_internal(cls, name, data):
@@ -126,11 +126,11 @@
It returns created object. All resources created in this method will
be automatically removed in tearDownClass method.
"""
- resp, body = cls.client.create_job_binary_internal(name, data)
+ _, resp_body = cls.client.create_job_binary_internal(name, data)
# store id of created job binary internal
- cls._job_binary_internals.append(body['id'])
+ cls._job_binary_internals.append(resp_body['id'])
- return resp, body
+ return resp_body
def create_job_binary(cls, name, url, extra=None, **kwargs):
"""Creates watched job binary with specified params.
@@ -139,8 +139,8 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_job_binary(name, url, extra, **kwargs)
+ _, resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
# store id of created job binary
- cls._job_binaries.append(body['id'])
+ cls._job_binaries.append(resp_body['id'])
- return resp, body
+ return resp_body
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index ad9ed2a..ff67c1c 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -39,7 +39,7 @@
}
}
}
- resp_body = cls.create_node_group_template(**node_group_template)[1]
+ resp_body = cls.create_node_group_template(**node_group_template)
node_group_template_id = resp_body['id']
cls.full_cluster_template = {
@@ -95,23 +95,22 @@
def _create_cluster_template(self, template_name=None):
"""Creates Cluster Template with optional name specified.
- It creates template and ensures response status, template name and
- response body. Returns id and name of created template.
+ It creates template, ensures template name and response body.
+ Returns id and name of created template.
"""
if not template_name:
# generate random name if it's not specified
template_name = data_utils.rand_name('sahara-cluster-template')
# create cluster template
- resp, body = self.create_cluster_template(template_name,
- **self.full_cluster_template)
+ resp_body = self.create_cluster_template(template_name,
+ **self.full_cluster_template)
# ensure that template created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.cluster_template, body)
+ self.assertEqual(template_name, resp_body['name'])
+ self.assertDictContainsSubset(self.cluster_template, resp_body)
- return body['id'], template_name
+ return resp_body['id'], template_name
@test.attr(type='smoke')
def test_cluster_template_create(self):
@@ -122,8 +121,7 @@
template_info = self._create_cluster_template()
# check for cluster template in list
- resp, templates = self.client.list_cluster_templates()
- self.assertEqual(200, resp.status)
+ _, templates = self.client.list_cluster_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@@ -133,16 +131,14 @@
template_id, template_name = self._create_cluster_template()
# check cluster template fetch by id
- resp, template = self.client.get_cluster_template(template_id)
- self.assertEqual(200, resp.status)
+ _, template = self.client.get_cluster_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.cluster_template, template)
@test.attr(type='smoke')
def test_cluster_template_delete(self):
- template_id = self._create_cluster_template()[0]
+ template_id, _ = self._create_cluster_template()
# delete the cluster template by id
- resp = self.client.delete_cluster_template(template_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_cluster_template(template_id)
# TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index 345153b..aae56c4 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -48,65 +48,59 @@
def _create_data_source(self, source_body, source_name=None):
"""Creates Data Source with optional name specified.
- It creates a link to input-source file (it may not exist) and ensures
- response status and source name. Returns id and name of created source.
+ It creates a link to input-source file (it may not exist), ensures
+ source name and response body. Returns id and name of created source.
"""
if not source_name:
# generate random name if it's not specified
source_name = data_utils.rand_name('sahara-data-source')
# create data source
- resp, body = self.create_data_source(source_name, **source_body)
+ resp_body = self.create_data_source(source_name, **source_body)
# ensure that source created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(source_name, body['name'])
+ self.assertEqual(source_name, resp_body['name'])
if source_body['type'] == 'swift':
source_body = self.swift_data_source
- self.assertDictContainsSubset(source_body, body)
+ self.assertDictContainsSubset(source_body, resp_body)
- return body['id'], source_name
+ return resp_body['id'], source_name
def _list_data_sources(self, source_info):
# check for data source in list
- resp, sources = self.client.list_data_sources()
- self.assertEqual(200, resp.status)
+ _, sources = self.client.list_data_sources()
sources_info = [(source['id'], source['name']) for source in sources]
self.assertIn(source_info, sources_info)
def _get_data_source(self, source_id, source_name, source_body):
# check data source fetch by id
- resp, source = self.client.get_data_source(source_id)
- self.assertEqual(200, resp.status)
+ _, source = self.client.get_data_source(source_id)
self.assertEqual(source_name, source['name'])
self.assertDictContainsSubset(source_body, source)
- def _delete_data_source(self, source_id):
- # delete the data source by id
- resp = self.client.delete_data_source(source_id)[0]
- self.assertEqual(204, resp.status)
-
@test.attr(type='smoke')
def test_swift_data_source_create(self):
self._create_data_source(self.swift_data_source_with_creds)
@test.attr(type='smoke')
def test_swift_data_source_list(self):
- source_info = self._create_data_source(
- self.swift_data_source_with_creds)
+ source_info = (
+ self._create_data_source(self.swift_data_source_with_creds))
self._list_data_sources(source_info)
@test.attr(type='smoke')
def test_swift_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.swift_data_source_with_creds)
+ source_id, source_name = (
+ self._create_data_source(self.swift_data_source_with_creds))
self._get_data_source(source_id, source_name, self.swift_data_source)
@test.attr(type='smoke')
def test_swift_data_source_delete(self):
- source_id = self._create_data_source(
- self.swift_data_source_with_creds)[0]
- self._delete_data_source(source_id)
+ source_id, _ = (
+ self._create_data_source(self.swift_data_source_with_creds))
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
@test.attr(type='smoke')
def test_local_hdfs_data_source_create(self):
@@ -119,15 +113,17 @@
@test.attr(type='smoke')
def test_local_hdfs_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.local_hdfs_data_source)
+ source_id, source_name = (
+ self._create_data_source(self.local_hdfs_data_source))
self._get_data_source(
source_id, source_name, self.local_hdfs_data_source)
@test.attr(type='smoke')
def test_local_hdfs_data_source_delete(self):
- source_id = self._create_data_source(self.local_hdfs_data_source)[0]
- self._delete_data_source(source_id)
+ source_id, _ = self._create_data_source(self.local_hdfs_data_source)
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
@test.attr(type='smoke')
def test_external_hdfs_data_source_create(self):
@@ -140,12 +136,14 @@
@test.attr(type='smoke')
def test_external_hdfs_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.external_hdfs_data_source)
+ source_id, source_name = (
+ self._create_data_source(self.external_hdfs_data_source))
self._get_data_source(
source_id, source_name, self.external_hdfs_data_source)
@test.attr(type='smoke')
def test_external_hdfs_data_source_delete(self):
- source_id = self._create_data_source(self.external_hdfs_data_source)[0]
- self._delete_data_source(source_id)
+ source_id, _ = self._create_data_source(self.external_hdfs_data_source)
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index 689c1fe..15ee145 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -40,8 +40,8 @@
name = data_utils.rand_name('sahara-internal-job-binary')
cls.job_binary_data = 'Some script may be data'
- job_binary_internal = cls.create_job_binary_internal(
- name, cls.job_binary_data)[1]
+ job_binary_internal = (
+ cls.create_job_binary_internal(name, cls.job_binary_data))
cls.internal_db_job_binary = {
'url': 'internal-db://%s' % job_binary_internal['id'],
'description': 'Test job binary',
@@ -50,26 +50,25 @@
def _create_job_binary(self, binary_body, binary_name=None):
"""Creates Job Binary with optional name specified.
- It creates a link to data (jar, pig files, etc.) and ensures response
- status, job binary name and response body. Returns id and name of
- created job binary. Data may not exist when using Swift
- as data storage. In other cases data must exist in storage.
+ It creates a link to data (jar, pig files, etc.), ensures job binary
+ name and response body. Returns id and name of created job binary.
+ Data may not exist when using Swift as data storage.
+ In other cases data must exist in storage.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary')
# create job binary
- resp, body = self.create_job_binary(binary_name, **binary_body)
+ resp_body = self.create_job_binary(binary_name, **binary_body)
# ensure that binary created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(binary_name, body['name'])
+ self.assertEqual(binary_name, resp_body['name'])
if 'swift' in binary_body['url']:
binary_body = self.swift_job_binary
- self.assertDictContainsSubset(binary_body, body)
+ self.assertDictContainsSubset(binary_body, resp_body)
- return body['id'], binary_name
+ return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_swift_job_binary_create(self):
@@ -80,30 +79,27 @@
binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
# check for job binary in list
- resp, binaries = self.client.list_job_binaries()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binaries()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_swift_job_binary_get(self):
- binary_id, binary_name = self._create_job_binary(
- self.swift_job_binary_with_extra)
+ binary_id, binary_name = (
+ self._create_job_binary(self.swift_job_binary_with_extra))
# check job binary fetch by id
- resp, binary = self.client.get_job_binary(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary(binary_id)
self.assertEqual(binary_name, binary['name'])
self.assertDictContainsSubset(self.swift_job_binary, binary)
@test.attr(type='smoke')
def test_swift_job_binary_delete(self):
- binary_id = self._create_job_binary(
- self.swift_job_binary_with_extra)[0]
+ binary_id, _ = (
+ self._create_job_binary(self.swift_job_binary_with_extra))
# delete the job binary by id
- resp = self.client.delete_job_binary(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary(binary_id)
@test.attr(type='smoke')
def test_internal_db_job_binary_create(self):
@@ -114,35 +110,31 @@
binary_info = self._create_job_binary(self.internal_db_job_binary)
# check for job binary in list
- resp, binaries = self.client.list_job_binaries()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binaries()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_internal_db_job_binary_get(self):
- binary_id, binary_name = self._create_job_binary(
- self.internal_db_job_binary)
+ binary_id, binary_name = (
+ self._create_job_binary(self.internal_db_job_binary))
# check job binary fetch by id
- resp, binary = self.client.get_job_binary(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary(binary_id)
self.assertEqual(binary_name, binary['name'])
self.assertDictContainsSubset(self.internal_db_job_binary, binary)
@test.attr(type='smoke')
def test_internal_db_job_binary_delete(self):
- binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+ binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
# delete the job binary by id
- resp = self.client.delete_job_binary(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary(binary_id)
@test.attr(type='smoke')
def test_job_binary_get_data(self):
- binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+ binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
# get data of job binary by id
- resp, data = self.client.get_job_binary_data(binary_id)
- self.assertEqual(200, resp.status)
+ _, data = self.client.get_job_binary_data(binary_id)
self.assertEqual(data, self.job_binary_data)
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 6d59177..45e1140 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -29,23 +29,22 @@
def _create_job_binary_internal(self, binary_name=None):
"""Creates Job Binary Internal with optional name specified.
- It puts data into Sahara database and ensures response status and
- job binary internal name. Returns id and name of created job binary
- internal.
+ It puts data into Sahara database and ensures job binary internal name.
+ Returns id and name of created job binary internal.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary-internal')
# create job binary internal
- resp, body = self.create_job_binary_internal(
- binary_name, self.job_binary_internal_data)
+ resp_body = (
+ self.create_job_binary_internal(binary_name,
+ self.job_binary_internal_data))
# ensure that job binary internal created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(binary_name, body['name'])
+ self.assertEqual(binary_name, resp_body['name'])
- return body['id'], binary_name
+ return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_job_binary_internal_create(self):
@@ -56,8 +55,7 @@
binary_info = self._create_job_binary_internal()
# check for job binary internal in list
- resp, binaries = self.client.list_job_binary_internals()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binary_internals()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@@ -66,23 +64,20 @@
binary_id, binary_name = self._create_job_binary_internal()
# check job binary internal fetch by id
- resp, binary = self.client.get_job_binary_internal(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary_internal(binary_id)
self.assertEqual(binary_name, binary['name'])
@test.attr(type='smoke')
def test_job_binary_internal_delete(self):
- binary_id = self._create_job_binary_internal()[0]
+ binary_id, _ = self._create_job_binary_internal()
# delete the job binary internal by id
- resp = self.client.delete_job_binary_internal(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary_internal(binary_id)
@test.attr(type='smoke')
def test_job_binary_internal_get_data(self):
- binary_id = self._create_job_binary_internal()[0]
+ binary_id, _ = self._create_job_binary_internal()
# get data of job binary internal by id
- resp, data = self.client.get_job_binary_internal_data(binary_id)
- self.assertEqual(200, resp.status)
+ _, data = self.client.get_job_binary_internal_data(binary_id)
self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index 04f98b4..c2c0075 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -43,7 +43,7 @@
def _create_node_group_template(self, template_name=None):
"""Creates Node Group Template with optional name specified.
- It creates template and ensures response status and template name.
+ It creates template, ensures template name and response body.
Returns id and name of created template.
"""
if not template_name:
@@ -51,15 +51,14 @@
template_name = data_utils.rand_name('sahara-ng-template')
# create node group template
- resp, body = self.create_node_group_template(
- template_name, **self.node_group_template)
+ resp_body = self.create_node_group_template(template_name,
+ **self.node_group_template)
# ensure that template created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.node_group_template, body)
+ self.assertEqual(template_name, resp_body['name'])
+ self.assertDictContainsSubset(self.node_group_template, resp_body)
- return body['id'], template_name
+ return resp_body['id'], template_name
@test.attr(type='smoke')
def test_node_group_template_create(self):
@@ -70,8 +69,7 @@
template_info = self._create_node_group_template()
# check for node group template in list
- resp, templates = self.client.list_node_group_templates()
- self.assertEqual(200, resp.status)
+ _, templates = self.client.list_node_group_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@@ -81,15 +79,13 @@
template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
- resp, template = self.client.get_node_group_template(template_id)
- self.assertEqual(200, resp.status)
+ _, template = self.client.get_node_group_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.node_group_template, template)
@test.attr(type='smoke')
def test_node_group_template_delete(self):
- template_id = self._create_node_group_template()[0]
+ template_id, _ = self._create_node_group_template()
# delete the node group template by id
- resp = self.client.delete_node_group_template(template_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_node_group_template(template_id)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index d643f23..9fd7a17 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -20,10 +20,9 @@
def _list_all_plugin_names(self):
"""Returns all enabled plugin names.
- It ensures response status and main plugins availability.
+ It ensures main plugins availability.
"""
- resp, plugins = self.client.list_plugins()
- self.assertEqual(200, resp.status)
+ _, plugins = self.client.list_plugins()
plugins_names = [plugin['name'] for plugin in plugins]
self.assertIn('vanilla', plugins_names)
self.assertIn('hdp', plugins_names)
@@ -37,14 +36,12 @@
@test.attr(type='smoke')
def test_plugin_get(self):
for plugin_name in self._list_all_plugin_names():
- resp, plugin = self.client.get_plugin(plugin_name)
- self.assertEqual(200, resp.status)
+ _, plugin = self.client.get_plugin(plugin_name)
self.assertEqual(plugin_name, plugin['name'])
for plugin_version in plugin['versions']:
- resp, detailed_plugin = self.client.get_plugin(plugin_name,
- plugin_version)
- self.assertEqual(200, resp.status)
+ _, detailed_plugin = self.client.get_plugin(plugin_name,
+ plugin_version)
self.assertEqual(plugin_name, detailed_plugin['name'])
# check that required image tags contains name and version
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 1548f89..6beb8f2 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -40,7 +40,7 @@
cls.setup_endpoints = list()
for i in range(2):
region = data_utils.rand_name('region')
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
interface = 'public'
resp, endpoint = cls.client.create_endpoint(
cls.service_id, interface, url, region=region, enabled=True)
@@ -69,7 +69,7 @@
@test.attr(type='gate')
def test_create_list_delete_endpoint(self):
region = data_utils.rand_name('region')
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
interface = 'public'
resp, endpoint =\
self.client.create_endpoint(self.service_id, interface, url,
@@ -97,7 +97,7 @@
# Creating an endpoint so as to check update endpoint
# with new values
region1 = data_utils.rand_name('region')
- url1 = data_utils.rand_name('url')
+ url1 = data_utils.rand_url()
interface1 = 'public'
resp, endpoint_for_update =\
self.client.create_endpoint(self.service_id, interface1,
@@ -114,7 +114,7 @@
self.service_ids.append(service2['id'])
# Updating endpoint with new values
region2 = data_utils.rand_name('region')
- url2 = data_utils.rand_name('url')
+ url2 = data_utils.rand_url()
interface2 = 'internal'
resp, endpoint = \
self.client.update_endpoint(endpoint_for_update['id'],
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 1d63cce..d728b1d 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -49,7 +49,7 @@
def test_create_with_enabled_False(self):
# Enabled should be a boolean, not a string like 'False'
interface = 'public'
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
region = data_utils.rand_name('region')
self.assertRaises(exceptions.BadRequest, self.client.create_endpoint,
self.service_id, interface, url, region=region,
@@ -59,7 +59,7 @@
def test_create_with_enabled_True(self):
# Enabled should be a boolean, not a string like 'True'
interface = 'public'
- url = data_utils.rand_name('url')
+ url = data_utils.rand_url()
region = data_utils.rand_name('region')
self.assertRaises(exceptions.BadRequest, self.client.create_endpoint,
self.service_id, interface, url, region=region,
@@ -69,7 +69,7 @@
# Create an endpoint
region1 = data_utils.rand_name('region')
- url1 = data_utils.rand_name('url')
+ url1 = data_utils.rand_url()
interface1 = 'public'
resp, endpoint_for_update = (
self.client.create_endpoint(self.service_id, interface1,
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
new file mode 100644
index 0000000..497c5ea
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -0,0 +1,100 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ _interface = 'json'
+
+ def _list_users_with_params(self, params, key, expected, not_expected):
+ # Helper method to list users filtered with params and
+ # assert the response based on expected and not_expected
+ # expected: user expected in the list response
+ # not_expected: user, which should not be present in list response
+ _, body = self.client.get_users(params)
+ self.assertIn(expected[key], map(lambda x: x[key], body))
+ self.assertNotIn(not_expected[key],
+ map(lambda x: x[key], body))
+
+ @classmethod
+ def setUpClass(cls):
+ super(UsersV3TestJSON, cls).setUpClass()
+ alt_user = data_utils.rand_name('test_user')
+ alt_password = data_utils.rand_name('pass')
+ cls.alt_email = alt_user + '@testmail.tm'
+ cls.data.setup_test_domain()
+ # Create user with Domain
+ u1_name = data_utils.rand_name('test_user')
+ _, cls.domain_enabled_user = cls.client.create_user(
+ u1_name, password=alt_password,
+ email=cls.alt_email, domain_id=cls.data.domain['id'])
+ cls.data.v3_users.append(cls.domain_enabled_user)
+ # Create default not enabled user
+ u2_name = data_utils.rand_name('test_user')
+ _, cls.non_domain_enabled_user = cls.client.create_user(
+ u2_name, password=alt_password,
+ email=cls.alt_email, enabled=False)
+ cls.data.v3_users.append(cls.non_domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_user_domains(self):
+ # List users with domain
+ params = {'domain_id': self.data.domain['id']}
+ self._list_users_with_params(params, 'domain_id',
+ self.domain_enabled_user,
+ self.non_domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_users_with_not_enabled(self):
+ # List the users with not enabled
+ params = {'enabled': False}
+ self._list_users_with_params(params, 'enabled',
+ self.non_domain_enabled_user,
+ self.domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_users_with_name(self):
+ # List users with name
+ params = {'name': self.domain_enabled_user['name']}
+ self._list_users_with_params(params, 'name',
+ self.domain_enabled_user,
+ self.non_domain_enabled_user)
+
+ @test.attr(type='gate')
+ def test_list_users(self):
+ # List users
+ _, body = self.client.get_users()
+ fetched_ids = [u['id'] for u in body]
+ missing_users = [u['id'] for u in self.data.v3_users
+ if u['id'] not in fetched_ids]
+ self.assertEqual(0, len(missing_users),
+ "Failed to find user %s in fetched list" %
+ ', '.join(m_user for m_user in missing_users))
+
+ @test.attr(type='gate')
+ def test_get_user(self):
+ # Get a user detail
+ _, user = self.client.get_user(self.data.v3_users[0]['id'])
+ self.assertEqual(self.data.v3_users[0]['id'], user['id'])
+ self.assertEqual(self.data.v3_users[0]['name'], user['name'])
+ self.assertEqual(self.alt_email, user['email'])
+ self.assertEqual(self.data.domain['id'], user['domain_id'])
+
+
+class UsersV3TestXML(UsersV3TestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index fed5171..1561a6e 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -150,7 +150,6 @@
self.assertNotIn('v3/roles/%s' % self.not_delegated_role_id,
role['links']['self'])
- @test.skip_because(bug='1334368')
def check_trust_roles(self):
# Check we find the delegated role
_, roles_get = self.trustor_client.get_trust_roles(
@@ -164,12 +163,6 @@
_, role_get = self.trustor_client.check_trust_role(
self.trust_id, self.delegated_role_id)
- # This tempest two-step change conflicted with the change
- # moving response checking to the client. This test should be
- # re-enabled by removing the following assert and changing
- # the response code in tempest/services/identity/v3/json/
- # identity_client.py in the check_trust_role_method.
- # self.assertEqual('200', resp['status'])
# And that we don't find not_delegated_role
self.assertRaises(exceptions.NotFound,
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 558575e..3c25819 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -65,6 +65,28 @@
self.assertEqual('false', str(new_user_get['enabled']).lower())
@test.attr(type='gate')
+ def test_update_user_password(self):
+ # Creating User to check password updation
+ u_name = data_utils.rand_name('user')
+ original_password = data_utils.rand_name('pass')
+ _, user = self.client.create_user(
+ u_name, password=original_password)
+ # Delete the User at the end all test methods
+ self.addCleanup(self.client.delete_user, user['id'])
+ # Update user with new password
+ new_password = data_utils.rand_name('pass1')
+ self.client.update_user_password(user['id'], new_password,
+ original_password)
+ resp, body = self.token.auth(user['id'], new_password)
+ self.assertEqual(201, resp.status)
+ subject_token = resp['x-subject-token']
+ # Perform GET Token to verify and confirm password is updated
+ _, token_details = self.client.get_token(subject_token)
+ self.assertEqual(resp['x-subject-token'], subject_token)
+ self.assertEqual(token_details['user']['id'], user['id'])
+ self.assertEqual(token_details['user']['name'], u_name)
+
+ @test.attr(type='gate')
def test_list_user_projects(self):
# List the projects that a user has access upon
assigned_project_ids = list()
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 0991576..8eb7d33 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -121,6 +121,7 @@
self.v3_users = []
self.projects = []
self.v3_roles = []
+ self.domains = []
@property
def test_credentials(self):
@@ -185,6 +186,15 @@
_, self.v3_role = self.client.create_role(self.test_role)
self.v3_roles.append(self.v3_role)
+ def setup_test_domain(self):
+ """Set up a test domain."""
+ self.test_domain = data_utils.rand_name('test_domain')
+ self.test_description = data_utils.rand_name('desc')
+ _, self.domain = self.client.create_domain(
+ name=self.test_domain,
+ description=self.test_description)
+ self.domains.append(self.domain)
+
def teardown_all(self):
for user in self.users:
self.client.delete_user(user['id'])
@@ -198,3 +208,6 @@
self.client.delete_project(v3_project['id'])
for v3_role in self.v3_roles:
self.client.delete_role(v3_role['id'])
+ for domain in self.domains:
+ self.client.update_domain(domain['id'], enabled=False)
+ self.client.delete_domain(domain['id'])
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index ae777eb..4226815 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -86,7 +86,8 @@
# Verifying deletion
_, images = self.client.image_list()
- self.assertNotIn(image_id, images)
+ images_id = [item['id'] for item in images]
+ self.assertNotIn(image_id, images_id)
@test.attr(type='gate')
def test_update_image(self):
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index d1a8faf..9fa54b1 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -46,8 +46,7 @@
raise cls.skipException(msg)
cls.identity_admin_client = cls.os_adm.identity_client
- @test.attr(type='gate')
- def test_quotas(self):
+ def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
@@ -56,14 +55,15 @@
description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+
# Change quotas for tenant
- new_quotas = {'network': 0, 'security_group': 0}
resp, quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
self.assertEqual('200', resp['status'])
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Confirm our tenant is listed among tenants with non default quotas
resp, non_default_quotas = self.admin_client.list_quotas()
self.assertEqual('200', resp['status'])
@@ -72,12 +72,14 @@
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
+
+ # Confirm from API quotas were changed as requested for tenant
resp, quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
self.assertEqual('200', resp['status'])
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Reset quotas to default and confirm
resp, body = self.admin_client.reset_quotas(tenant_id)
self.assertEqual('204', resp['status'])
@@ -86,49 +88,14 @@
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
+ @test.attr(type='gate')
+ def test_quotas(self):
+ new_quotas = {'network': 0, 'security_group': 0}
+ self._check_quotas(new_quotas)
+
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
- # Add a tenant to conduct the test
- test_tenant = data_utils.rand_name('test_tenant_')
- test_description = data_utils.rand_name('desc_')
- _, tenant = self.identity_admin_client.create_tenant(
- name=test_tenant,
- description=test_description)
- tenant_id = tenant['id']
- self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
- # Change lbaas quotas for tenant
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
-
- resp, quota_set = self.admin_client.update_quotas(tenant_id,
- **new_quotas)
- self.assertEqual('200', resp['status'])
- self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Confirm our tenant is listed among tenants with non default quotas
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- found = False
- for qs in non_default_quotas['quotas']:
- if qs['tenant_id'] == tenant_id:
- found = True
- self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
- resp, quota_set = self.admin_client.show_quotas(tenant_id)
- quota_set = quota_set['quota']
- self.assertEqual('200', resp['status'])
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Reset quotas to default and confirm
- resp, body = self.admin_client.reset_quotas(tenant_id)
- self.assertEqual('204', resp['status'])
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- for q in non_default_quotas['quotas']:
- self.assertNotEqual(tenant_id, q['tenant_id'])
+ self._check_quotas(new_quotas)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 446f4ab..531df2d 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -11,6 +11,7 @@
# under the License.
import os.path
+import yaml
from tempest import clients
from tempest.common.utils import data_utils
@@ -84,11 +85,8 @@
pass
for stack_identifier in cls.stacks:
- try:
- cls.client.wait_for_stack_status(
- stack_identifier, 'DELETE_COMPLETE')
- except exceptions.NotFound:
- pass
+ cls.client.wait_for_stack_status(
+ stack_identifier, 'DELETE_COMPLETE')
@classmethod
def _create_keypair(cls, name_start='keypair-heat-'):
@@ -125,7 +123,7 @@
pass
@classmethod
- def load_template(cls, name, ext='yaml'):
+ def read_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
@@ -134,6 +132,14 @@
return content
@classmethod
+ def load_template(cls, name, ext='yaml'):
+ loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+ fullpath = os.path.join(os.path.dirname(__file__), *loc)
+
+ with open(fullpath, "r") as f:
+ return yaml.safe_load(f)
+
+ @classmethod
def tearDownClass(cls):
cls._clear_stacks()
cls._clear_keypairs()
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index 3911e72..96e1c50 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -28,7 +28,7 @@
def test_environment_parameter(self):
"""Test passing a stack parameter via the environment."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('random_string')
+ template = self.read_template('random_string')
environment = {'parameters': {'random_length': 20}}
stack_identifier = self.create_stack(stack_name, template,
@@ -56,7 +56,7 @@
'''
environment = {'resource_registry':
{'My:Random::String': 'my_random.yaml'}}
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
environment=environment,
@@ -65,7 +65,10 @@
# random_string.yaml specifies a length of 10
random_value = self.get_stack_output(stack_identifier, 'random_value')
- self.assertEqual(10, len(random_value))
+ random_string_template = self.load_template('random_string')
+ expected_length = random_string_template['parameters'][
+ 'random_length']['default']
+ self.assertEqual(expected_length, len(random_value))
@test.attr(type='gate')
def test_files_provider_resource(self):
@@ -82,7 +85,7 @@
random_value:
value: {get_attr: [random, random_value]}
'''
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
files=files)
@@ -90,4 +93,7 @@
# random_string.yaml specifies a length of 10
random_value = self.get_stack_output(stack_identifier, 'random_value')
- self.assertEqual(10, len(random_value))
+ random_string_template = self.load_template('random_string')
+ expected_length = random_string_template['parameters'][
+ 'random_length']['default']
+ self.assertEqual(expected_length, len(random_value))
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index e92b945..27c6196 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -39,7 +39,7 @@
raise cls.skipException("Neutron support is required")
cls.network_client = os.network_client
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('neutron_basic')
+ template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 585c90b..a97c561 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -28,7 +28,7 @@
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('non_empty_stack')
+ template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
cls._create_image()['id'])
# create the stack
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index a81a540..e22a08b 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -23,12 +23,14 @@
class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
_tpl_type = 'yaml'
+ _resource = 'resources'
+ _type = 'type'
@classmethod
def setUpClass(cls):
super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('nova_keypair', ext=cls._tpl_type)
+ template = cls.read_template('nova_keypair', ext=cls._tpl_type)
# create the stack, avoid any duplicated key.
cls.stack_identifier = cls.create_stack(
@@ -49,8 +51,15 @@
@test.attr(type='slow')
def test_created_resources(self):
"""Verifies created keypair resource."""
- resources = [('KeyPairSavePrivate', 'OS::Nova::KeyPair'),
- ('KeyPairDontSavePrivate', 'OS::Nova::KeyPair')]
+
+ nova_keypair_template = self.load_template('nova_keypair',
+ ext=self._tpl_type)
+ resources = [('KeyPairSavePrivate',
+ nova_keypair_template[self._resource][
+ 'KeyPairSavePrivate'][self._type]),
+ ('KeyPairDontSavePrivate',
+ nova_keypair_template[self._resource][
+ 'KeyPairDontSavePrivate'][self._type])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
@@ -85,3 +94,5 @@
class NovaKeyPairResourcesAWSTest(NovaKeyPairResourcesYAMLTest):
_tpl_type = 'json'
+ _resource = 'Resources'
+ _type = 'Type'
diff --git a/tempest/api/orchestration/stacks/test_soft_conf.py b/tempest/api/orchestration/stacks/test_soft_conf.py
new file mode 100644
index 0000000..8903d4c
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_soft_conf.py
@@ -0,0 +1,163 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class TestSoftwareConfig(base.BaseOrchestrationTest):
+
+ def setUp(self):
+ super(TestSoftwareConfig, self).setUp()
+ self.configs = []
+ # Add 2 sets of software configuration
+ self.configs.append(self._config_create('a'))
+ self.configs.append(self._config_create('b'))
+ # Create a deployment using config a's id
+ self._deployment_create(self.configs[0]['id'])
+
+ def _config_create(self, suffix):
+ configuration = {'group': 'script',
+ 'inputs': [],
+ 'outputs': [],
+ 'options': {}}
+ configuration['name'] = 'heat_soft_config_%s' % suffix
+ configuration['config'] = '#!/bin/bash echo init-%s' % suffix
+ api_config = self.client.create_software_config(**configuration)
+ configuration['id'] = api_config['software_config']['id']
+ self.addCleanup(self._config_delete, configuration['id'])
+ self._validate_config(configuration, api_config)
+ return configuration
+
+ def _validate_config(self, configuration, api_config):
+ # Assert all expected keys are present with matching data
+ for k in configuration.keys():
+ self.assertEqual(configuration[k],
+ api_config['software_config'][k])
+
+ def _deployment_create(self, config_id):
+ self.server_id = data_utils.rand_name('dummy-server')
+ self.action = 'ACTION_0'
+ self.status = 'STATUS_0'
+ self.input_values = {}
+ self.output_values = []
+ self.status_reason = 'REASON_0'
+ self.signal_transport = 'NO_SIGNAL'
+ self.deployment = self.client.create_software_deploy(
+ self.server_id, config_id, self.action, self.status,
+ self.input_values, self.output_values, self.status_reason,
+ self.signal_transport)
+ self.deployment_id = self.deployment['software_deployment']['id']
+ self.addCleanup(self._deployment_delete, self.deployment_id)
+
+ def _deployment_delete(self, deploy_id):
+ self.client.delete_software_deploy(deploy_id)
+ # Testing that it is really gone
+ self.assertRaises(
+ exceptions.NotFound, self.client.get_software_deploy,
+ self.deployment_id)
+
+ def _config_delete(self, config_id):
+ self.client.delete_software_config(config_id)
+ # Testing that it is really gone
+ self.assertRaises(
+ exceptions.NotFound, self.client.get_software_config, config_id)
+
+ @test.attr(type='smoke')
+ def test_get_software_config(self):
+ """Testing software config get."""
+ for conf in self.configs:
+ api_config = self.client.get_software_config(conf['id'])
+ self._validate_config(conf, api_config)
+
+ @test.attr(type='smoke')
+ def test_get_deployment_list(self):
+ """Getting a list of all deployments"""
+ deploy_list = self.client.get_software_deploy_list()
+ deploy_ids = [deploy['id'] for deploy in
+ deploy_list['software_deployments']]
+ self.assertIn(self.deployment_id, deploy_ids)
+
+ @test.attr(type='smoke')
+ def test_get_deployment_metadata(self):
+ """Testing deployment metadata get"""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ conf_ids = [conf['id'] for conf in metadata['metadata']]
+ self.assertIn(self.configs[0]['id'], conf_ids)
+
+ def _validate_deployment(self, action, status, reason, config_id):
+ deployment = self.client.get_software_deploy(self.deployment_id)
+ self.assertEqual(action, deployment['software_deployment']['action'])
+ self.assertEqual(status, deployment['software_deployment']['status'])
+ self.assertEqual(reason,
+ deployment['software_deployment']['status_reason'])
+ self.assertEqual(config_id,
+ deployment['software_deployment']['config_id'])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_create_validate(self):
+ """Testing software deployment was created as expected."""
+ # Asserting that all fields were created
+ self.assert_fields_in_dict(
+ self.deployment['software_deployment'], 'action', 'config_id',
+ 'id', 'input_values', 'output_values', 'server_id', 'status',
+ 'status_reason')
+ # Testing get for this deployment and verifying parameters
+ self._validate_deployment(self.action, self.status,
+ self.status_reason, self.configs[0]['id'])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_update_no_metadata_change(self):
+ """Testing software deployment update without metadata change."""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ # Updating values without changing the configuration ID
+ new_action = 'ACTION_1'
+ new_status = 'STATUS_1'
+ new_reason = 'REASON_1'
+ self.client.update_software_deploy(
+ self.deployment_id, self.server_id, self.configs[0]['id'],
+ new_action, new_status, self.input_values, self.output_values,
+ new_reason, self.signal_transport)
+ # Verifying get and that the deployment was updated as expected
+ self._validate_deployment(new_action, new_status,
+ new_reason, self.configs[0]['id'])
+
+ # Metadata should not be changed at this point
+ test_metadata = self.client.get_software_deploy_meta(self.server_id)
+ for key in metadata['metadata'][0]:
+ self.assertEqual(
+ metadata['metadata'][0][key],
+ test_metadata['metadata'][0][key])
+
+ @test.attr(type='smoke')
+ def test_software_deployment_update_with_metadata_change(self):
+ """Testing software deployment update with metadata change."""
+ metadata = self.client.get_software_deploy_meta(self.server_id)
+ self.client.update_software_deploy(
+ self.deployment_id, self.server_id, self.configs[1]['id'],
+ self.action, self.status, self.input_values,
+ self.output_values, self.status_reason, self.signal_transport)
+ self._validate_deployment(self.action, self.status,
+ self.status_reason, self.configs[1]['id'])
+ # Metadata should now be changed
+ new_metadata = self.client.get_software_deploy_meta(self.server_id)
+ # Its enough to test the ID in this case
+ meta_id = metadata['metadata'][0]['id']
+ test_id = new_metadata['metadata'][0]['id']
+ self.assertNotEqual(meta_id, test_id)
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 6d53fb2..adab8c3 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -30,7 +30,7 @@
def setUpClass(cls):
super(SwiftResourcesTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('swift_basic')
+ template = cls.read_template('swift_basic')
os = clients.Manager()
if not CONF.service_available.swift:
raise cls.skipException("Swift support is required")
@@ -49,8 +49,11 @@
def test_created_resources(self):
"""Created stack should be in the list of existing stacks."""
- resources = [('SwiftContainer', 'OS::Swift::Container'),
- ('SwiftContainerWebsite', 'OS::Swift::Container')]
+ swift_basic_template = self.load_template('swift_basic')
+ resources = [('SwiftContainer', swift_basic_template['resources'][
+ 'SwiftContainer']['type']),
+ ('SwiftContainerWebsite', swift_basic_template[
+ 'resources']['SwiftContainerWebsite']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name)
self.assertIsInstance(resource, dict)
@@ -84,10 +87,9 @@
self.assertIn(h, headers)
def test_metadata(self):
- metadatas = {
- "web-index": "index.html",
- "web-error": "error.html"
- }
+ swift_basic_template = self.load_template('swift_basic')
+ metadatas = swift_basic_template['resources']['SwiftContainerWebsite'][
+ 'properties']['X-Container-Meta']
swcont_website = self.test_resources.get(
'SwiftContainerWebsite')['physical_resource_id']
headers, _ = self.container_client.list_container_metadata(
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index 5ac2a8d..d422752 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -31,43 +31,44 @@
if not CONF.service_available.cinder:
raise cls.skipException('Cinder support is required')
- def _cinder_verify(self, volume_id):
+ def _cinder_verify(self, volume_id, template):
self.assertIsNotNone(volume_id)
resp, volume = self.volumes_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
self.assertEqual('available', volume.get('status'))
- self.assertEqual(1, volume.get('size'))
- self.assertEqual('a descriptive description',
- volume.get('display_description'))
- self.assertEqual('volume_name',
- volume.get('display_name'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'size'], volume.get('size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], volume.get('display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], volume.get('display_name'))
- def _outputs_verify(self, stack_identifier):
+ def _outputs_verify(self, stack_identifier, template):
self.assertEqual('available',
self.get_stack_output(stack_identifier, 'status'))
- self.assertEqual('1',
- self.get_stack_output(stack_identifier, 'size'))
- self.assertEqual('a descriptive description',
- self.get_stack_output(stack_identifier,
- 'display_description'))
- self.assertEqual('volume_name',
- self.get_stack_output(stack_identifier,
- 'display_name'))
+ self.assertEqual(str(template['resources']['volume']['properties'][
+ 'size']), self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], self.get_stack_output(stack_identifier,
+ 'display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.attr(type='gate')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic')
+ template = self.read_template('cinder_basic')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
- self._cinder_verify(volume_id)
+ cinder_basic_template = self.load_template('cinder_basic')
+ self._cinder_verify(volume_id, cinder_basic_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, cinder_basic_template)
# Delete the stack and ensure the volume is gone
self.client.delete_stack(stack_identifier)
@@ -86,21 +87,22 @@
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic_delete_retain')
+ template = self.read_template('cinder_basic_delete_retain')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
self.addCleanup(self._cleanup_volume, volume_id)
- self._cinder_verify(volume_id)
+ retain_template = self.load_template('cinder_basic_delete_retain')
+ self._cinder_verify(volume_id, retain_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, retain_template)
# Delete the stack and ensure the volume is *not* gone
self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
- self._cinder_verify(volume_id)
+ self._cinder_verify(volume_id, retain_template)
# Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 2b422fd..b5b2bb1 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -34,19 +34,27 @@
cls.telemetry_client = os.telemetry_client
cls.servers_client = os.servers_client
cls.flavors_client = os.flavors_client
+ cls.image_client = os.image_client
+ cls.image_client_v2 = os.image_client_v2
cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
'disk.ephemeral.size']
+
+ cls.glance_notifications = ['image.update', 'image.upload',
+ 'image.delete']
+
+ cls.glance_v2_notifications = ['image.download', 'image.serve']
+
cls.server_ids = []
cls.alarm_ids = []
+ cls.image_ids = []
@classmethod
def create_alarm(cls, **kwargs):
resp, body = cls.telemetry_client.create_alarm(
name=data_utils.rand_name('telemetry_alarm'),
type='threshold', **kwargs)
- if resp['status'] == '201':
- cls.alarm_ids.append(body['alarm_id'])
+ cls.alarm_ids.append(body['alarm_id'])
return resp, body
@classmethod
@@ -55,8 +63,15 @@
data_utils.rand_name('ceilometer-instance'),
CONF.compute.image_ref, CONF.compute.flavor_ref,
wait_until='ACTIVE')
- if resp['status'] == '202':
- cls.server_ids.append(body['id'])
+ cls.server_ids.append(body['id'])
+ return resp, body
+
+ @classmethod
+ def create_image(cls, client):
+ resp, body = client.create_image(
+ data_utils.rand_name('image'), container_format='bare',
+ disk_format='raw', visibility='private')
+ cls.image_ids.append(body['id'])
return resp, body
@staticmethod
@@ -71,6 +86,7 @@
def tearDownClass(cls):
cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
+ cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
cls.clear_isolated_creds()
super(BaseTelemetryTest, cls).tearDownClass()
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 148f5a3..2a170c7 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,6 +32,7 @@
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
+ @test.skip_because(bug="1336755")
def test_check_nova_notification(self):
resp, body = self.create_server()
@@ -42,6 +43,36 @@
for metric in self.nova_notifications:
self.await_samples(metric, query)
+ @test.attr(type="smoke")
+ @test.services("image")
+ @testtools.skipIf(not CONF.image_feature_enabled.api_v1,
+ "Glance api v1 is disabled")
+ def test_check_glance_v1_notifications(self):
+ _, body = self.create_image(self.image_client)
+ self.image_client.update_image(body['id'], data='data')
+
+ query = 'resource', 'eq', body['id']
+
+ self.image_client.delete_image(body['id'])
+
+ for metric in self.glance_notifications:
+ self.await_samples(metric, query)
+
+ @test.attr(type="smoke")
+ @test.services("image")
+ @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
+ "Glance api v2 is disabled")
+ def test_check_glance_v2_notifications(self):
+ _, body = self.create_image(self.image_client_v2)
+
+ self.image_client_v2.store_image(body['id'], "file")
+ self.image_client_v2.get_image_file(body['id'])
+
+ query = 'resource', 'eq', body['id']
+
+ for metric in self.glance_v2_notifications:
+ self.await_samples(metric, query)
+
class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
_interface = 'xml'
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index e79d23c..d451517 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -36,42 +36,55 @@
cls.volume_client = cls.os_adm.volumes_client
cls.volume_type_id_list = []
- cls.volume_id_list = []
+ cls.volume_id_list_with_prefix = []
+ cls.volume_id_list_without_prefix = []
- # Volume/Type creation (uses backend1_name)
- type1_name = data_utils.rand_name('Type-')
- vol1_name = data_utils.rand_name('Volume-')
- extra_specs1 = {"volume_backend_name": cls.backend1_name}
- resp, cls.type1 = cls.client.create_volume_type(
- type1_name, extra_specs=extra_specs1)
- cls.volume_type_id_list.append(cls.type1['id'])
-
- resp, cls.volume1 = cls.volume_client.create_volume(
- size=1, display_name=vol1_name, volume_type=type1_name)
- cls.volume_id_list.append(cls.volume1['id'])
- cls.volume_client.wait_for_volume_status(cls.volume1['id'],
- 'available')
+ # Volume/Type creation (uses volume_backend_name)
+ cls._create_type_and_volume(cls.backend1_name, False)
+ # Volume/Type creation (uses capabilities:volume_backend_name)
+ cls._create_type_and_volume(cls.backend1_name, True)
if cls.backend1_name != cls.backend2_name:
# Volume/Type creation (uses backend2_name)
- type2_name = data_utils.rand_name('Type-')
- vol2_name = data_utils.rand_name('Volume-')
- extra_specs2 = {"volume_backend_name": cls.backend2_name}
- resp, cls.type2 = cls.client.create_volume_type(
- type2_name, extra_specs=extra_specs2)
- cls.volume_type_id_list.append(cls.type2['id'])
+ cls._create_type_and_volume(cls.backend2_name, False)
+ # Volume/Type creation (uses capabilities:volume_backend_name)
+ cls._create_type_and_volume(cls.backend2_name, True)
- resp, cls.volume2 = cls.volume_client.create_volume(
- size=1, display_name=vol2_name, volume_type=type2_name)
- cls.volume_id_list.append(cls.volume2['id'])
- cls.volume_client.wait_for_volume_status(cls.volume2['id'],
- 'available')
+ @classmethod
+ def _create_type_and_volume(self, backend_name_key, with_prefix):
+ # Volume/Type creation
+ type_name = data_utils.rand_name('Type')
+ vol_name = data_utils.rand_name('Volume')
+ spec_key_with_prefix = "capabilities:volume_backend_name"
+ spec_key_without_prefix = "volume_backend_name"
+ if with_prefix:
+ extra_specs = {spec_key_with_prefix: backend_name_key}
+ else:
+ extra_specs = {spec_key_without_prefix: backend_name_key}
+ resp, self.type = self.client.create_volume_type(
+ type_name, extra_specs=extra_specs)
+ self.volume_type_id_list.append(self.type['id'])
+
+ resp, self.volume = self.volume_client.create_volume(
+ size=1, display_name=vol_name, volume_type=type_name)
+ self.volume_client.wait_for_volume_status(
+ self.volume['id'], 'available')
+ if with_prefix:
+ self.volume_id_list_with_prefix.append(self.volume['id'])
+ else:
+ self.volume_id_list_without_prefix.append(
+ self.volume['id'])
@classmethod
def tearDownClass(cls):
# volumes deletion
- volume_id_list = getattr(cls, 'volume_id_list', [])
- for volume_id in volume_id_list:
+ vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
+ for volume_id in vid_prefix:
+ cls.volume_client.delete_volume(volume_id)
+ cls.volume_client.wait_for_resource_deletion(volume_id)
+
+ vid_no_pre = getattr(cls, 'volume_id_list_without_prefix', [])
+ for volume_id in vid_no_pre:
cls.volume_client.delete_volume(volume_id)
cls.volume_client.wait_for_resource_deletion(volume_id)
@@ -84,32 +97,57 @@
@test.attr(type='smoke')
def test_backend_name_reporting(self):
+ # get volume id which created by type without prefix
+ volume_id = self.volume_id_list_without_prefix[0]
+ self._test_backend_name_reporting_by_volume_id(volume_id)
+
+ @test.attr(type='smoke')
+ def test_backend_name_reporting_with_prefix(self):
+ # get volume id which created by type with prefix
+ volume_id = self.volume_id_list_with_prefix[0]
+ self._test_backend_name_reporting_by_volume_id(volume_id)
+
+ @test.attr(type='gate')
+ def test_backend_name_distinction(self):
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
+ # get volume id which created by type without prefix
+ volume1_id = self.volume_id_list_without_prefix[0]
+ volume2_id = self.volume_id_list_without_prefix[1]
+ self._test_backend_name_distinction(volume1_id, volume2_id)
+
+ @test.attr(type='gate')
+ def test_backend_name_distinction_with_prefix(self):
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
+ # get volume id which created by type without prefix
+ volume1_id = self.volume_id_list_with_prefix[0]
+ volume2_id = self.volume_id_list_with_prefix[1]
+ self._test_backend_name_distinction(volume1_id, volume2_id)
+
+ def _test_backend_name_reporting_by_volume_id(self, volume_id):
# this test checks if os-vol-attr:host is populated correctly after
# the multi backend feature has been enabled
# if multi-backend is enabled: os-vol-attr:host should be like:
# host@backend_name
- resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
volume1_host = volume['os-vol-host-attr:host']
msg = ("multi-backend reporting incorrect values for volume %s" %
- self.volume1['id'])
+ volume_id)
self.assertTrue(len(volume1_host.split("@")) > 1, msg)
- @test.attr(type='gate')
- def test_backend_name_distinction(self):
+ def _test_backend_name_distinction(self, volume1_id, volume2_id):
# this test checks that the two volumes created at setUp don't
# belong to the same backend (if they are, than the
# volume backend distinction is not working properly)
- if self.backend1_name == self.backend2_name:
- raise self.skipException("backends configured with same name")
-
- resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(volume1_id)
volume1_host = volume['os-vol-host-attr:host']
- resp, volume = self.volume_client.get_volume(self.volume2['id'])
+ resp, volume = self.volume_client.get_volume(volume2_id)
volume2_host = volume['os-vol-host-attr:host']
msg = ("volumes %s and %s were created in the same backend" %
- (self.volume1['id'], self.volume2['id']))
+ (volume1_id, volume2_id))
self.assertNotEqual(volume1_host, volume2_host, msg)
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index ad0aa29..a16e425 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -48,50 +48,52 @@
}
}
+common_show_server = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'image': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'flavor': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'user_id': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'links': parameter_types.links,
+ 'addresses': parameter_types.addresses,
+ },
+ # NOTE(GMann): 'progress' attribute is present in the response
+ # only when server's status is one of the progress statuses
+ # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
+ # So it is not defined as 'required'.
+ 'required': ['id', 'name', 'status', 'image', 'flavor',
+ 'user_id', 'tenant_id', 'created', 'updated',
+ 'metadata', 'links', 'addresses']
+}
+
base_update_get_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'server': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'name': {'type': 'string'},
- 'status': {'type': 'string'},
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
- 'flavor': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
- 'user_id': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'created': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'links': parameter_types.links,
- 'addresses': parameter_types.addresses,
- },
- # NOTE(GMann): 'progress' attribute is present in the response
- # only when server's status is one of the progress statuses
- # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
- # So it is not defined as 'required'.
- 'required': ['id', 'name', 'status', 'image', 'flavor',
- 'user_id', 'tenant_id', 'created', 'updated',
- 'metadata', 'links', 'addresses']
- }
+ 'server': common_show_server
},
'required': ['server']
}
@@ -179,3 +181,40 @@
'required': ['action', 'request_id', 'user_id', 'project_id',
'start_time', 'message']
}
+
+instance_action_events = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'event': {'type': 'string'},
+ 'start_time': {'type': 'string'},
+ 'finish_time': {'type': 'string'},
+ 'result': {'type': 'string'},
+ 'traceback': {'type': ['string', 'null']}
+ },
+ 'required': ['event', 'start_time', 'finish_time', 'result',
+ 'traceback']
+ }
+}
+
+common_get_instance_action = copy.deepcopy(common_instance_actions)
+
+common_get_instance_action['properties'].update({
+ 'events': instance_action_events})
+# 'events' does not come in response body always so it is not
+# defined as 'required'
+
+base_list_servers_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'servers': {
+ 'type': 'array',
+ 'items': common_show_server
+ }
+ },
+ 'required': ['servers']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index dc4054c..95c5760 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -240,3 +240,33 @@
'required': ['instanceActions']
}
}
+
+get_instance_actions_object = copy.deepcopy(servers.common_get_instance_action)
+get_instance_actions_object[
+ 'properties'].update({'instance_uuid': {'type': 'string'}})
+get_instance_actions_object['required'].extend(['instance_uuid'])
+
+get_instance_action = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'instanceAction': get_instance_actions_object
+ },
+ 'required': ['instanceAction']
+ }
+}
+
+list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({
+ 'hostId': {'type': 'string'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6
+ })
+# NOTE(GMann): OS-DCF:diskConfig and accessIPv4/v6 are API
+# extensions, and some environments return a response
+# without these attributes. So they are not 'required'.
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('hostId')
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 1af951f..541d3ff 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -26,7 +26,7 @@
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
- 'volumeType': {'type': 'string'},
+ 'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
@@ -74,7 +74,7 @@
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
- 'volumeType': {'type': 'string'},
+ 'volumeType': {'type': ['string', 'null']},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index 3b50516..dc800cd 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -151,3 +151,33 @@
'required': ['server_actions']
}
}
+
+get_server_actions_object = copy.deepcopy(servers.common_get_instance_action)
+get_server_actions_object[
+ 'properties'].update({'server_uuid': {'type': 'string'}})
+get_server_actions_object['required'].extend(['server_uuid'])
+
+get_server_action = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server_action': get_server_actions_object
+ },
+ 'required': ['server_action']
+ }
+}
+
+list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'properties'].update({
+ 'addresses': addresses_v3,
+ 'host_id': {'type': 'string'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+ })
+# NOTE(GMann): os-access-ips:access_ip_v4/v6 are API extension,
+# and some environments return a response without these
+# attributes. So they are not 'required'.
+list_servers_detail['response_body']['properties']['servers']['items'][
+ 'required'].append('host_id')
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
index dcd940b..bc18084 100644
--- a/tempest/cli/README.rst
+++ b/tempest/cli/README.rst
@@ -1,3 +1,5 @@
+.. _cli_field_guide:
+
Tempest Field Guide to CLI tests
================================
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index f462358..02f8c05 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -19,6 +19,7 @@
import tempest.cli.output_parser
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -121,7 +122,7 @@
cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
flags, action, params])
LOG.info("running: '%s'" % cmd)
- cmd = shlex.split(cmd)
+ cmd = shlex.split(cmd.encode('utf-8'))
result = ''
result_err = ''
stdout = subprocess.PIPE
@@ -130,10 +131,10 @@
cmd, stdout=stdout, stderr=stderr)
result, result_err = proc.communicate()
if not fail_ok and proc.returncode != 0:
- raise CommandFailed(proc.returncode,
- cmd,
- result,
- stderr=result_err)
+ raise exceptions.CommandFailed(proc.returncode,
+ cmd,
+ result,
+ result_err)
return result
def assertTableStruct(self, items, field_names):
@@ -146,11 +147,3 @@
self.assertTrue(lines[0].startswith(beginning),
msg=('Beginning of first line has invalid content: %s'
% lines[:3]))
-
-
-class CommandFailed(subprocess.CalledProcessError):
- # adds output attribute for python2.6
- def __init__(self, returncode, cmd, output, stderr=""):
- super(CommandFailed, self).__init__(returncode, cmd)
- self.output = output
- self.stderr = stderr
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index 946b89e..04971c1 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -15,17 +15,17 @@
import logging
import re
-import subprocess
import testtools
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyCinderClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyCinderClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Cinder CLI client.
Checks return values and output of read-only commands.
@@ -41,7 +41,7 @@
super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
def test_cinder_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.cinder,
'this-does-not-exist')
@@ -66,7 +66,7 @@
'Attached to'])
self.cinder('list', params='--all-tenants 1')
self.cinder('list', params='--all-tenants 0')
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.cinder,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 9869483..90cdc55 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -14,10 +14,10 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
@@ -25,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyGlanceClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyGlanceClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Glance CLI client.
Checks return values and output of read-only commands.
@@ -41,7 +41,7 @@
super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
def test_glance_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.glance,
'this-does-not-exist')
@@ -76,7 +76,7 @@
commands = set(commands)
wanted_commands = set(('image-create', 'image-delete', 'help',
'image-download', 'image-show', 'image-update',
- 'member-add', 'member-create', 'member-delete',
+ 'member-create', 'member-delete',
'member-list'))
self.assertFalse(wanted_commands - commands)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index dda65c1..9218fcd 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -14,10 +14,10 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyKeystoneClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Keystone CLI client.
Checks return values and output of read-only commands.
@@ -35,7 +35,7 @@
"""
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.keystone,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 49d079e..87f6b67 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -14,10 +14,10 @@
# under the License.
import re
-import subprocess
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest import test
@@ -43,7 +43,7 @@
@test.attr(type='smoke')
def test_neutron_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
@@ -89,7 +89,7 @@
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
- except cli.CommandFailed as e:
+ except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index 1c1ddf1..7085cc9 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
import testtools
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -27,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyNovaClientTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only python-novaclient test. This
@@ -49,7 +48,7 @@
super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'this-does-nova-exist')
@@ -86,11 +85,11 @@
self.nova('endpoints')
def test_admin_flavor_acces_list(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'flavor-access-list')
# Failed to get access list for public flavor type
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'flavor-access-list',
params='--flavor m1.tiny')
@@ -127,7 +126,7 @@
self.nova('list')
self.nova('list', params='--all-tenants 1')
self.nova('list', params='--all-tenants 0')
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index f1fee2e..dae0cf8 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -13,10 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -24,7 +23,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaManageTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only nova-manage test. This
@@ -48,7 +47,7 @@
super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.nova_manage,
'this-does-nova-exist')
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index f00dcae..2c6e0e2 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -14,10 +14,10 @@
# limitations under the License.
import logging
import re
-import subprocess
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -42,7 +42,7 @@
@test.attr(type='negative')
def test_sahara_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.sahara,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/test_swift.py
index 6d6caa7..069a384 100644
--- a/tempest/cli/simple_read_only/test_swift.py
+++ b/tempest/cli/simple_read_only/test_swift.py
@@ -14,15 +14,15 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
+from tempest import exceptions
CONF = config.CONF
-class SimpleReadOnlySwiftClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlySwiftClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Swift CLI client.
Checks return values and output of read-only commands.
@@ -38,7 +38,7 @@
super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
def test_swift_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(exceptions.CommandFailed,
self.swift,
'this-does-not-exist')
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 19e816b..c1a2e46 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -28,6 +28,7 @@
import argparse
import tempest.auth
+from tempest import config
from tempest import exceptions
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
@@ -169,7 +170,7 @@
def collect_users(users):
global USERS
- LOG.info("Creating users")
+ LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
@@ -202,6 +203,7 @@
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
+ LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
_, found = client.identity.get_user(user['id'])
@@ -217,6 +219,9 @@
def check_objects(self):
"""Check that the objects created are still there."""
+ if 'objects' not in self.res:
+ return
+ LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
@@ -226,6 +231,9 @@
def check_servers(self):
"""Check that the servers are still up and running."""
+ if 'servers' not in self.res:
+ return
+ LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
@@ -236,12 +244,19 @@
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
- self.assertEqual(os.system("ping -c 1 " + addr), 0,
- "Server %s is not pingable at %s" % (
- server['name'], addr))
+ for count in range(60):
+ return_code = os.system("ping -c1 " + addr)
+ if return_code is 0:
+ break
+ self.assertNotEqual(count, 59,
+ "Server %s is not pingable at %s" % (
+ server['name'], addr))
def check_volumes(self):
"""Check that the volumes are still there and attached."""
+ if 'volumes' not in self.res:
+ return
+ LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
found = _get_volume_by_name(client, volume['name'])
@@ -269,6 +284,8 @@
def create_objects(objects):
+ if not objects:
+ return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
@@ -293,6 +310,9 @@
def create_images(images):
+ if not images:
+ return
+ LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
@@ -300,6 +320,7 @@
r, body = client.images.image_list()
names = [x['name'] for x in body]
if image['name'] in names:
+ LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
@@ -355,15 +376,39 @@
def create_servers(servers):
+ if not servers:
+ return
+ LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
+ LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
- client.servers.create_server(server['name'], image_id, flavor_id)
+ resp, body = client.servers.create_server(server['name'], image_id,
+ flavor_id)
+ server_id = body['id']
+ client.servers.wait_for_server_status(server_id, 'ACTIVE')
+
+
+def destroy_servers(servers):
+ if not servers:
+ return
+ LOG.info("Destroying servers")
+ for server in servers:
+ client = client_for_user(server['owner'])
+
+ response = _get_server_by_name(client, server['name'])
+ if not response:
+ LOG.info("Server '%s' does not exist" % server['name'])
+ continue
+
+ client.servers.delete_server(response['id'])
+ client.servers.wait_for_server_termination(response['id'],
+ ignore_error=True)
#######################
@@ -424,6 +469,23 @@
# attach_volumes(RES['volumes'])
+def destroy_resources():
+ LOG.info("Destroying Resources")
+ # Destroy in inverse order of create
+
+ # Future
+ # detach_volumes
+ # destroy_volumes
+
+ destroy_servers(RES['servers'])
+ LOG.warn("Destroy mode incomplete")
+ # destroy_images
+ # destroy_objects
+
+ # destroy_users
+ # destroy_tenants
+
+
def get_options():
global OPTS
parser = argparse.ArgumentParser(
@@ -436,11 +498,17 @@
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
+
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
+ parser.add_argument(
+ '-c', '--config-file',
+ metavar='/etc/tempest.conf',
+ help='path to javelin2(tempest) config file')
+
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
@@ -460,6 +528,8 @@
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
+ if OPTS.config_file:
+ config.CONF.set_config_path(OPTS.config_file)
def setup_logging(debug=True):
@@ -487,15 +557,20 @@
if OPTS.mode == 'create':
create_resources()
+ # Make sure the resources we just created actually work
+ checker = JavelinCheck(USERS, RES)
+ checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
- LOG.warn("Destroy mode not yet implemented")
+ collect_users(RES['users'])
+ destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
+ LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 0834cff..673da4f 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -29,13 +29,12 @@
CONF = config.CONF
RAW_HTTP = httplib2.Http()
-CONF_FILE = None
-OUTFILE = sys.stdout
+CONF_PARSER = None
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
- os.path.dirname(os.path.dirname(__file__))), "etc")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
@@ -46,14 +45,9 @@
def change_option(option, group, value):
- config_parse = moves.configparser.SafeConfigParser()
- config_parse.optionxform = str
- config_parse.readfp(CONF_FILE)
- if not config_parse.has_section(group):
- config_parse.add_section(group)
- config_parse.set(group, option, str(value))
- global OUTFILE
- config_parse.write(OUTFILE)
+ if not CONF_PARSER.has_section(group):
+ CONF_PARSER.add_section(group)
+ CONF_PARSER.set(group, option, str(value))
def print_and_or_update(option, group, value, update):
@@ -288,6 +282,9 @@
if update:
change_option(codename_match[cfgname],
'service_available', True)
+ # If we are going to enable this we should allow
+ # extension checks.
+ avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
return avail_services
@@ -321,12 +318,16 @@
opts = parse_args()
update = opts.update
replace = opts.replace_ext
- global CONF_FILE
- global OUTFILE
+ global CONF_PARSER
+
+ outfile = sys.stdout
if update:
- CONF_FILE = _get_config_file()
+ conf_file = _get_config_file()
if opts.output:
- OUTFILE = open(opts.output, 'w+')
+ outfile = open(opts.output, 'w+')
+ CONF_PARSER = moves.configparser.SafeConfigParser()
+ CONF_PARSER.optionxform = str
+ CONF_PARSER.readfp(conf_file)
os = clients.ComputeAdminManager(interface='json')
services = check_service_availability(os, update)
results = {}
@@ -341,9 +342,10 @@
verify_nova_api_versions(os, update)
verify_cinder_api_versions(os, update)
display_results(results, update, replace)
- if CONF_FILE:
- CONF_FILE.close()
- OUTFILE.close()
+ if update:
+ conf_file.close()
+ CONF_PARSER.write(outfile)
+ outfile.close()
if __name__ == "__main__":
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 2ab008d..6583475 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -25,7 +25,7 @@
def sudo_cmd_call(cmd):
- args = shlex.split(cmd)
+ args = shlex.split(cmd.encode('utf-8'))
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
@@ -84,7 +84,7 @@
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
'file1': file_from,
'dest': dest}
- args = shlex.split(cmd)
+ args = shlex.split(cmd.encode('utf-8'))
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
proc = subprocess.Popen(args, **subprocess_args)
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
new file mode 100644
index 0000000..dc4f049
--- /dev/null
+++ b/tempest/common/cred_provider.py
@@ -0,0 +1,44 @@
+# (c) 2014 Deutsche Telekom AG
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import six
+
+from tempest import config
+from tempest.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class CredentialProvider(object):
+ def __init__(self, name, tempest_client=True, interface='json',
+ password='pass', network_resources=None):
+ self.name = name
+
+ @abc.abstractmethod
+ def get_primary_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_admin_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_alt_creds(self):
+ return
+
+ @abc.abstractmethod
+ def clear_isolated_creds(self):
+ return
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 208f42f..98b0116 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -16,6 +16,7 @@
from tempest import auth
from tempest import clients
+from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -25,15 +26,16 @@
LOG = logging.getLogger(__name__)
-class IsolatedCreds(object):
+class IsolatedCreds(cred_provider.CredentialProvider):
def __init__(self, name, tempest_client=True, interface='json',
password='pass', network_resources=None):
+ super(IsolatedCreds, self).__init__(name, tempest_client, interface,
+ password, network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_net_resources = {}
self.ports = []
- self.name = name
self.tempest_client = tempest_client
self.interface = interface
self.password = password
diff --git a/tempest/common/utils/data_utils.py b/tempest/common/utils/data_utils.py
index a0a88dd..174e557 100644
--- a/tempest/common/utils/data_utils.py
+++ b/tempest/common/utils/data_utils.py
@@ -34,6 +34,11 @@
return randbits
+def rand_url():
+ randbits = str(random.randint(1, 0x7fffffff))
+ return 'https://url-' + randbits + '.com'
+
+
def rand_int_id(start=0, end=0x7fffffff):
return random.randint(start, end)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index d8474a0..d242c14 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -22,16 +22,6 @@
LOG = logging.getLogger(__name__)
-def _console_dump(client, server_id):
- try:
- resp, output = client.get_console_output(server_id, None)
- LOG.debug("Console Output for Server %s:\n%s" % (
- server_id, output))
- except exceptions.NotFound:
- LOG.debug("Server %s: doesn't have a console" % server_id)
- pass
-
-
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
@@ -81,10 +71,12 @@
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
-
if (server_status == 'ERROR') and raise_on_error:
- _console_dump(client, server_id)
- raise exceptions.BuildErrorException(server_id=server_id)
+ if 'fault' in body:
+ raise exceptions.BuildErrorException(body['fault'],
+ server_id=server_id)
+ else:
+ raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
@@ -99,11 +91,9 @@
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
-
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
- _console_dump(client, server_id)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
diff --git a/tempest/config.py b/tempest/config.py
index 008627f..851d8c1 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1089,18 +1089,22 @@
cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
group='compute-admin')
- def __init__(self, parse_conf=True):
+ def __init__(self, parse_conf=True, config_path=None):
"""Initialize a configuration from a conf directory and conf file."""
super(TempestConfigPrivate, self).__init__()
config_files = []
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
- # Environment variables override defaults...
- conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
- self.DEFAULT_CONFIG_DIR)
- conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE)
+ if config_path:
+ path = config_path
+ else:
+ # Environment variables override defaults...
+ conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
+ self.DEFAULT_CONFIG_DIR)
+ conf_file = os.environ.get('TEMPEST_CONFIG',
+ self.DEFAULT_CONFIG_FILE)
- path = os.path.join(conf_dir, conf_file)
+ path = os.path.join(conf_dir, conf_file)
if not os.path.isfile(path):
path = failsafe_path
@@ -1122,6 +1126,7 @@
class TempestConfigProxy(object):
_config = None
+ _path = None
_extra_log_defaults = [
'keystoneclient.session=INFO',
@@ -1138,9 +1143,12 @@
def __getattr__(self, attr):
if not self._config:
self._fix_log_levels()
- self._config = TempestConfigPrivate()
+ self._config = TempestConfigPrivate(config_path=self._path)
return getattr(self._config, attr)
+ def set_config_path(self, path):
+ self._path = path
+
CONF = TempestConfigProxy()
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 4eb1cea..9d443cc 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -211,3 +211,17 @@
class InvalidStructure(TempestException):
message = "Invalid structure of table with details"
+
+
+class CommandFailed(Exception):
+ def __init__(self, returncode, cmd, output, stderr):
+ super(CommandFailed, self).__init__()
+ self.returncode = returncode
+ self.cmd = cmd
+ self.stdout = output
+ self.stderr = stderr
+
+ def __str__(self):
+ return ("Command '%s' returned non-zero exit status %d.\n"
+ "stdout:\n%s\n"
+ "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
index 835ba99..5a287d6 100644
--- a/tempest/scenario/README.rst
+++ b/tempest/scenario/README.rst
@@ -1,3 +1,5 @@
+.. _scenario_field_guide:
+
Tempest Field Guide to Scenario tests
=====================================
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index ca79325..aa24c31 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -53,6 +53,32 @@
LOG_cinder_client.addHandler(log.NullHandler())
+class ScenarioTest(tempest.test.BaseTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ScenarioTest, cls).setUpClass()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(
+ cls.__name__, tempest_client=True,
+ network_resources=cls.network_resources)
+ cls.manager = clients.Manager(
+ credentials=cls.credentials()
+ )
+
+ @classmethod
+ def _get_credentials(cls, get_creds, ctype):
+ if CONF.compute.allow_tenant_isolation:
+ creds = get_creds()
+ else:
+ creds = auth.get_default_credentials(ctype)
+ return creds
+
+ @classmethod
+ def credentials(cls):
+ return cls._get_credentials(cls.isolated_creds.get_primary_creds,
+ 'user')
+
+
class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 6418a73..4fcc70a 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -24,7 +24,7 @@
CONF = config.CONF
-class TestDashboardBasicOps(manager.OfficialClientTest):
+class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 800b3b0..8191984 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -141,7 +141,7 @@
server_or_ip=ip,
private_key=private_key)
- # Write a backend's responce into a file
+ # Write a backend's response into a file
resp = """echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n""" \
"""Connection: close\r\nContent-Type: text/html; """ \
"""charset=UTF-8\r\n\r\n%s"; cat >/dev/null"""
diff --git a/tempest/services/__init__.py b/tempest/services/__init__.py
index e7bec60..e69de29 100644
--- a/tempest/services/__init__.py
+++ b/tempest/services/__init__.py
@@ -1,37 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Base Service class, which acts as a descriptor for an OpenStack service
-in the test environment
-"""
-
-
-class Service(object):
-
- def __init__(self, config):
- """
- Initializes the service.
-
- :param config: `tempest.config.Config` object
- """
- self.config = config
-
- def get_client(self):
- """
- Returns a client object that may be used to query
- the service API.
- """
- raise NotImplementedError
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 69d2f35..80bb711 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -164,6 +164,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
@@ -473,6 +474,7 @@
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
+ self.validate_response(schema.get_instance_action, resp, body)
return resp, body['instanceAction']
def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index d933998..a5b31d3 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -166,6 +166,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
@@ -470,6 +471,7 @@
resp, body = self.get("servers/%s/os-server-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
+ self.validate_response(schema.get_server_action, resp, body)
return resp, body['server_action']
def force_delete_server(self, server_id, **kwargs):
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index c2c7fd1..1fe0cf1 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -25,29 +25,42 @@
super(DataProcessingClient, self).__init__(auth_provider)
self.service = CONF.data_processing.catalog_type
- @classmethod
- def _request_and_parse(cls, req_fun, uri, res_name, *args, **kwargs):
- """Make a request using specified req_fun and parse response.
+ def _request_and_check_resp(self, request_func, uri, resp_status):
+ """Make a request using specified request_func and check response
+ status code.
+
+ It returns pair: resp and response body.
+ """
+ resp, body = request_func(uri)
+ self.expected_success(resp_status, resp.status)
+ return resp, body
+
+ def _request_check_and_parse_resp(self, request_func, uri, resp_status,
+ resource_name, *args, **kwargs):
+ """Make a request using specified request_func, check response status
+ code and parse response body.
It returns pair: resp and parsed resource(s) body.
"""
- resp, body = req_fun(uri, headers={
- 'Content-Type': 'application/json'
- }, *args, **kwargs)
+ headers = {'Content-Type': 'application/json'}
+ resp, body = request_func(uri, headers=headers, *args, **kwargs)
+ self.expected_success(resp_status, resp.status)
body = json.loads(body)
- return resp, body[res_name]
+ return resp, body[resource_name]
def list_node_group_templates(self):
"""List all node group templates for a user."""
uri = 'node-group-templates'
- return self._request_and_parse(self.get, uri, 'node_group_templates')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'node_group_templates')
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
uri = 'node-group-templates/%s' % tmpl_id
- return self._request_and_parse(self.get, uri, 'node_group_template')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
node_processes, flavor_id,
@@ -67,20 +80,22 @@
'flavor_id': flavor_id,
'node_configs': node_configs or dict(),
})
- return self._request_and_parse(self.post, uri, 'node_group_template',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri, 202,
+ 'node_group_template',
+ body=json.dumps(body))
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
uri = 'node-group-templates/%s' % tmpl_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_plugins(self):
"""List all enabled plugins."""
uri = 'plugins'
- return self._request_and_parse(self.get, uri, 'plugins')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'plugins')
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
@@ -88,19 +103,21 @@
uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
- return self._request_and_parse(self.get, uri, 'plugin')
+ return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
def list_cluster_templates(self):
"""List all cluster templates for a user."""
uri = 'cluster-templates'
- return self._request_and_parse(self.get, uri, 'cluster_templates')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'cluster_templates')
def get_cluster_template(self, tmpl_id):
"""Returns the details of a single cluster template."""
uri = 'cluster-templates/%s' % tmpl_id
- return self._request_and_parse(self.get, uri, 'cluster_template')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'cluster_template')
def create_cluster_template(self, name, plugin_name, hadoop_version,
node_groups, cluster_configs=None,
@@ -119,26 +136,29 @@
'node_groups': node_groups,
'cluster_configs': cluster_configs or dict(),
})
- return self._request_and_parse(self.post, uri, 'cluster_template',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri, 202,
+ 'cluster_template',
+ body=json.dumps(body))
def delete_cluster_template(self, tmpl_id):
"""Deletes the specified cluster template by id."""
uri = 'cluster-templates/%s' % tmpl_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_data_sources(self):
"""List all data sources for a user."""
uri = 'data-sources'
- return self._request_and_parse(self.get, uri, 'data_sources')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'data_sources')
def get_data_source(self, source_id):
"""Returns the details of a single data source."""
uri = 'data-sources/%s' % source_id
- return self._request_and_parse(self.get, uri, 'data_source')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'data_source')
def create_data_source(self, name, data_source_type, url, **kwargs):
"""Creates data source with specified params.
@@ -153,57 +173,62 @@
'type': data_source_type,
'url': url
})
- return self._request_and_parse(self.post, uri, 'data_source',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri,
+ 202, 'data_source',
+ body=json.dumps(body))
def delete_data_source(self, source_id):
"""Deletes the specified data source by id."""
uri = 'data-sources/%s' % source_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_job_binary_internals(self):
"""List all job binary internals for a user."""
uri = 'job-binary-internals'
- return self._request_and_parse(self.get, uri, 'binaries')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'binaries')
def get_job_binary_internal(self, job_binary_id):
"""Returns the details of a single job binary internal."""
uri = 'job-binary-internals/%s' % job_binary_id
- return self._request_and_parse(self.get, uri, 'job_binary_internal')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'job_binary_internal')
def create_job_binary_internal(self, name, data):
"""Creates job binary internal with specified params."""
uri = 'job-binary-internals/%s' % name
- return self._request_and_parse(self.put, uri, 'job_binary_internal',
- data)
+ return self._request_check_and_parse_resp(self.put, uri, 202,
+ 'job_binary_internal', data)
def delete_job_binary_internal(self, job_binary_id):
"""Deletes the specified job binary internal by id."""
uri = 'job-binary-internals/%s' % job_binary_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_internal_data(self, job_binary_id):
"""Returns data of a single job binary internal."""
uri = 'job-binary-internals/%s/data' % job_binary_id
- return self.get(uri)
+ return self._request_and_check_resp(self.get, uri, 200)
def list_job_binaries(self):
"""List all job binaries for a user."""
uri = 'job-binaries'
- return self._request_and_parse(self.get, uri, 'binaries')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'binaries')
def get_job_binary(self, job_binary_id):
"""Returns the details of a single job binary."""
uri = 'job-binaries/%s' % job_binary_id
- return self._request_and_parse(self.get, uri, 'job_binary')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'job_binary')
def create_job_binary(self, name, url, extra=None, **kwargs):
"""Creates job binary with specified params.
@@ -218,17 +243,18 @@
'url': url,
'extra': extra or dict(),
})
- return self._request_and_parse(self.post, uri, 'job_binary',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri,
+ 202, 'job_binary',
+ body=json.dumps(body))
def delete_job_binary(self, job_binary_id):
"""Deletes the specified job binary by id."""
uri = 'job-binaries/%s' % job_binary_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_data(self, job_binary_id):
"""Returns data of a single job binary."""
uri = 'job-binaries/%s/data' % job_binary_id
- return self.get(uri)
+ return self._request_and_check_resp(self.get, uri, 200)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 8c72dfa..0188c2a 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -14,6 +14,7 @@
# under the License.
import json
+import urllib
from tempest.common import rest_client
from tempest import config
@@ -76,6 +77,17 @@
body = json.loads(body)
return resp, body['user']
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = {
+ 'password': password,
+ 'original_password': original_password
+ }
+ update_user = json.dumps({'user': update_user})
+ resp, _ = self.post('users/%s/password' % user_id, update_user)
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
@@ -83,9 +95,12 @@
body = json.loads(body)
return resp, body['projects']
- def get_users(self):
+ def get_users(self, params=None):
"""Get the list of users."""
- resp, body = self.get("users")
+ url = 'users'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['users']
@@ -498,10 +513,7 @@
"""HEAD Check if role is delegated by a trust."""
resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
% (trust_id, role_id))
- # This code needs to change to 200 when the keystone changes
- # for bug 1334368 merge and check_trust_roles test is
- # unskipped
- self.expected_success(204, resp.status)
+ self.expected_success(200, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 242b032..f3e084e 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -14,6 +14,7 @@
# under the License.
import json
+import urllib
from lxml import etree
@@ -76,6 +77,14 @@
array.append(common.xml_to_json(child))
return array
+ def _parse_users(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "user":
+ array.append(common.xml_to_json(child))
+ return array
+
def _parse_array(self, node):
array = []
for child in node.getchildren():
@@ -130,6 +139,17 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = common.Element("user",
+ xmlns=XMLNS,
+ password=password,
+ original_password=original_password)
+ resp, _ = self.post('users/%s/password' % user_id,
+ str(common.Document(update_user)))
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
@@ -137,11 +157,14 @@
body = self._parse_projects(etree.fromstring(body))
return resp, body
- def get_users(self):
+ def get_users(self, params=None):
"""Get the list of users."""
- resp, body = self.get("users")
+ url = 'users'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
self.expected_success(200, resp.status)
- body = self._parse_array(etree.fromstring(body))
+ body = self._parse_users(etree.fromstring(body))
return resp, body
def get_user(self, user_id):
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index c459f28..46b0ec4 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -181,7 +181,11 @@
fail_regexp = re.compile(failure_pattern)
while True:
- resp, body = self.get_stack(stack_identifier)
+ try:
+ resp, body = self.get_stack(stack_identifier)
+ except exceptions.NotFound:
+ if status == 'DELETE_COMPLETE':
+ return
stack_name = body['stack_name']
stack_status = body['stack_status']
if stack_status == status:
@@ -259,3 +263,140 @@
'parameters': parameters,
}
return self._validate_template(post_body)
+
+ def create_software_config(self, name=None, config=None, group=None,
+ inputs=None, outputs=None, options=None):
+ headers, body = self._prep_software_config_create(
+ name, config, group, inputs, outputs, options)
+
+ url = 'software_configs'
+ resp, body = self.post(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_config(self, conf_id):
+ """Returns a software configuration resource."""
+ url = 'software_configs/%s' % str(conf_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def delete_software_config(self, conf_id):
+ """Deletes a specific software configuration."""
+ url = 'software_configs/%s' % str(conf_id)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp)
+
+ def create_software_deploy(self, server_id=None, config_id=None,
+ action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None, signal_transport=None):
+ """Creates or updates a software deployment."""
+ headers, body = self._prep_software_deploy_update(
+ None, server_id, config_id, action, status, input_values,
+ output_values, status_reason, signal_transport)
+
+ url = 'software_deployments'
+ resp, body = self.post(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def update_software_deploy(self, deploy_id=None, server_id=None,
+ config_id=None, action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None, signal_transport=None):
+ """Creates or updates a software deployment."""
+ headers, body = self._prep_software_deploy_update(
+ deploy_id, server_id, config_id, action, status, input_values,
+ output_values, status_reason, signal_transport)
+
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, body = self.put(url, headers=headers, body=body)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy_list(self):
+ """Returns a list of all deployments."""
+ url = 'software_deployments'
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy(self, deploy_id):
+ """Returns a specific software deployment."""
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def get_software_deploy_meta(self, server_id):
+ """Return a config metadata for a specific server."""
+ url = 'software_deployments/metadata/%s' % server_id
+ resp, body = self.get(url)
+ self.expected_success(200, resp)
+ body = json.loads(body)
+ return body
+
+ def delete_software_deploy(self, deploy_id):
+ """Deletes a specific software deployment."""
+ url = 'software_deployments/%s' % str(deploy_id)
+ resp, _ = self.delete(url)
+ self.expected_success(204, resp)
+
+ def _prep_software_config_create(self, name=None, conf=None, group=None,
+ inputs=None, outputs=None, options=None):
+ """Prepares a software configuration body."""
+ post_body = {}
+ if name is not None:
+ post_body["name"] = name
+ if conf is not None:
+ post_body["config"] = conf
+ if group is not None:
+ post_body["group"] = group
+ if inputs is not None:
+ post_body["inputs"] = inputs
+ if outputs is not None:
+ post_body["outputs"] = outputs
+ if options is not None:
+ post_body["options"] = options
+ body = json.dumps(post_body)
+
+ headers = self.get_headers()
+ return headers, body
+
+ def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
+ config_id=None, action=None, status=None,
+ input_values=None, output_values=None,
+ status_reason=None,
+ signal_transport=None):
+ """Prepares a deployment create or update (if an id was given)."""
+ post_body = {}
+
+ if deploy_id is not None:
+ post_body["id"] = deploy_id
+ if server_id is not None:
+ post_body["server_id"] = server_id
+ if config_id is not None:
+ post_body["config_id"] = config_id
+ if action is not None:
+ post_body["action"] = action
+ if status is not None:
+ post_body["status"] = status
+ if input_values is not None:
+ post_body["input_values"] = input_values
+ if output_values is not None:
+ post_body["output_values"] = output_values
+ if status_reason is not None:
+ post_body["status_reason"] = status_reason
+ if signal_transport is not None:
+ post_body["signal_transport"] = signal_transport
+ body = json.dumps(post_body)
+
+ headers = self.get_headers()
+ return headers, body
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 0a63679..4f1f56c 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -1,3 +1,5 @@
+.. _stress_field_guide:
+
Tempest Field Guide to Stress Tests
===================================
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
index 33d321f..e54d4c0 100644
--- a/tempest/tests/README.rst
+++ b/tempest/tests/README.rst
@@ -1,3 +1,5 @@
+.. _unit_tests_field_guide:
+
Tempest Field Guide to Unit tests
=================================
diff --git a/tempest/tests/cli/test_command_failed.py b/tempest/tests/cli/test_command_failed.py
new file mode 100644
index 0000000..36a4fc8
--- /dev/null
+++ b/tempest/tests/cli/test_command_failed.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import exceptions
+from tempest.tests import base
+
+
+class TestOutputParser(base.TestCase):
+
+ def test_command_failed_exception(self):
+ returncode = 1
+ cmd = "foo"
+ stdout = "output"
+ stderr = "error"
+ try:
+ raise exceptions.CommandFailed(returncode, cmd, stdout, stderr)
+ except exceptions.CommandFailed as e:
+ self.assertIn(str(returncode), str(e))
+ self.assertIn(cmd, str(e))
+ self.assertIn(stdout, str(e))
+ self.assertIn(stderr, str(e))
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4bed0c2..536cbcf 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -58,6 +58,6 @@
class FakePrivate(config.TempestConfigPrivate):
- def __init__(self):
+ def __init__(self, parse_conf=True, config_path=None):
cfg.CONF([], default_config_files=[])
self._set_attrs()
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index c77faca..a7af619 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -102,7 +102,7 @@
}
}
- unkown_type_schema = {
+ unknown_type_schema = {
"type": "not_defined"
}
@@ -131,7 +131,7 @@
def test_generate_with_unknown_type(self):
self.assertRaises(TypeError, self.generator.generate,
- self.unkown_type_schema)
+ self.unknown_type_schema)
class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index 5a334c5..3dc2199 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -32,7 +32,7 @@
cmd = ' '.join([cmd, param])
LOG.info("running: '%s'" % cmd)
cmd_str = cmd
- cmd = shlex.split(cmd)
+ cmd = shlex.split(cmd.encode('utf-8'))
result = ''
result_err = ''
try:
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
index 53cb54b..b0bfdf7 100644
--- a/tempest/thirdparty/README.rst
+++ b/tempest/thirdparty/README.rst
@@ -1,3 +1,5 @@
+.. _third_party_field_guide:
+
Tempest Field Guide to Third Party API tests
============================================
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 7713931..2c68d6b 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -177,7 +177,10 @@
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
- self.assertEqual(len(tags), 0, str(tags))
+
+ # NOTE: Volume-attach and detach causes metadata (tags) to be created
+ # for the volume. So exclude them while asserting.
+ self.assertNotIn('key1', tags)
for instance in reservation.instances:
instance.stop()
diff --git a/test-requirements.txt b/test-requirements.txt
index 13ef291..cd8154b 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,6 +1,5 @@
hacking>=0.9.2,<0.10
# needed for doc build
-docutils==0.9.1
sphinx>=1.1.2,!=1.2.0,<1.3
python-subunit>=0.0.18
oslosphinx
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
index c6f8eab..8ad59bb 100755
--- a/tools/subunit-trace.py
+++ b/tools/subunit-trace.py
@@ -263,7 +263,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
- 'debug infomation in realtime')
+ 'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')