Merge "Add links to the field guide index"
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index cccaf13..f4d010e 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -127,6 +127,8 @@
self.client.migrate_server,
str(uuid.uuid4()))
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index ab0e83a..cfb5a3d 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -75,16 +75,16 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_node_group_template(name, plugin_name,
- hadoop_version,
- node_processes,
- flavor_id,
- node_configs,
- **kwargs)
+ _, resp_body = cls.client.create_node_group_template(name, plugin_name,
+ hadoop_version,
+ node_processes,
+ flavor_id,
+ node_configs,
+ **kwargs)
# store id of created node group template
- cls._node_group_templates.append(body['id'])
+ cls._node_group_templates.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_cluster_template(cls, name, plugin_name, hadoop_version,
@@ -95,15 +95,15 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_cluster_template(name, plugin_name,
- hadoop_version,
- node_groups,
- cluster_configs,
- **kwargs)
+ _, resp_body = cls.client.create_cluster_template(name, plugin_name,
+ hadoop_version,
+ node_groups,
+ cluster_configs,
+ **kwargs)
# store id of created cluster template
- cls._cluster_templates.append(body['id'])
+ cls._cluster_templates.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_data_source(cls, name, type, url, **kwargs):
@@ -113,11 +113,11 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+ _, resp_body = cls.client.create_data_source(name, type, url, **kwargs)
# store id of created data source
- cls._data_sources.append(body['id'])
+ cls._data_sources.append(resp_body['id'])
- return resp, body
+ return resp_body
@classmethod
def create_job_binary_internal(cls, name, data):
@@ -126,11 +126,11 @@
It returns created object. All resources created in this method will
be automatically removed in tearDownClass method.
"""
- resp, body = cls.client.create_job_binary_internal(name, data)
+ _, resp_body = cls.client.create_job_binary_internal(name, data)
# store id of created job binary internal
- cls._job_binary_internals.append(body['id'])
+ cls._job_binary_internals.append(resp_body['id'])
- return resp, body
+ return resp_body
def create_job_binary(cls, name, url, extra=None, **kwargs):
"""Creates watched job binary with specified params.
@@ -139,8 +139,8 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
- resp, body = cls.client.create_job_binary(name, url, extra, **kwargs)
+ _, resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
# store id of created job binary
- cls._job_binaries.append(body['id'])
+ cls._job_binaries.append(resp_body['id'])
- return resp, body
+ return resp_body
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
index ad9ed2a..ff67c1c 100644
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -39,7 +39,7 @@
}
}
}
- resp_body = cls.create_node_group_template(**node_group_template)[1]
+ resp_body = cls.create_node_group_template(**node_group_template)
node_group_template_id = resp_body['id']
cls.full_cluster_template = {
@@ -95,23 +95,22 @@
def _create_cluster_template(self, template_name=None):
"""Creates Cluster Template with optional name specified.
- It creates template and ensures response status, template name and
- response body. Returns id and name of created template.
+ It creates template, ensures template name and response body.
+ Returns id and name of created template.
"""
if not template_name:
# generate random name if it's not specified
template_name = data_utils.rand_name('sahara-cluster-template')
# create cluster template
- resp, body = self.create_cluster_template(template_name,
- **self.full_cluster_template)
+ resp_body = self.create_cluster_template(template_name,
+ **self.full_cluster_template)
# ensure that template created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.cluster_template, body)
+ self.assertEqual(template_name, resp_body['name'])
+ self.assertDictContainsSubset(self.cluster_template, resp_body)
- return body['id'], template_name
+ return resp_body['id'], template_name
@test.attr(type='smoke')
def test_cluster_template_create(self):
@@ -122,8 +121,7 @@
template_info = self._create_cluster_template()
# check for cluster template in list
- resp, templates = self.client.list_cluster_templates()
- self.assertEqual(200, resp.status)
+ _, templates = self.client.list_cluster_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@@ -133,16 +131,14 @@
template_id, template_name = self._create_cluster_template()
# check cluster template fetch by id
- resp, template = self.client.get_cluster_template(template_id)
- self.assertEqual(200, resp.status)
+ _, template = self.client.get_cluster_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.cluster_template, template)
@test.attr(type='smoke')
def test_cluster_template_delete(self):
- template_id = self._create_cluster_template()[0]
+ template_id, _ = self._create_cluster_template()
# delete the cluster template by id
- resp = self.client.delete_cluster_template(template_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_cluster_template(template_id)
# TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
index 345153b..aae56c4 100644
--- a/tempest/api/data_processing/test_data_sources.py
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -48,65 +48,59 @@
def _create_data_source(self, source_body, source_name=None):
"""Creates Data Source with optional name specified.
- It creates a link to input-source file (it may not exist) and ensures
- response status and source name. Returns id and name of created source.
+ It creates a link to input-source file (it may not exist), ensures
+ source name and response body. Returns id and name of created source.
"""
if not source_name:
# generate random name if it's not specified
source_name = data_utils.rand_name('sahara-data-source')
# create data source
- resp, body = self.create_data_source(source_name, **source_body)
+ resp_body = self.create_data_source(source_name, **source_body)
# ensure that source created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(source_name, body['name'])
+ self.assertEqual(source_name, resp_body['name'])
if source_body['type'] == 'swift':
source_body = self.swift_data_source
- self.assertDictContainsSubset(source_body, body)
+ self.assertDictContainsSubset(source_body, resp_body)
- return body['id'], source_name
+ return resp_body['id'], source_name
def _list_data_sources(self, source_info):
# check for data source in list
- resp, sources = self.client.list_data_sources()
- self.assertEqual(200, resp.status)
+ _, sources = self.client.list_data_sources()
sources_info = [(source['id'], source['name']) for source in sources]
self.assertIn(source_info, sources_info)
def _get_data_source(self, source_id, source_name, source_body):
# check data source fetch by id
- resp, source = self.client.get_data_source(source_id)
- self.assertEqual(200, resp.status)
+ _, source = self.client.get_data_source(source_id)
self.assertEqual(source_name, source['name'])
self.assertDictContainsSubset(source_body, source)
- def _delete_data_source(self, source_id):
- # delete the data source by id
- resp = self.client.delete_data_source(source_id)[0]
- self.assertEqual(204, resp.status)
-
@test.attr(type='smoke')
def test_swift_data_source_create(self):
self._create_data_source(self.swift_data_source_with_creds)
@test.attr(type='smoke')
def test_swift_data_source_list(self):
- source_info = self._create_data_source(
- self.swift_data_source_with_creds)
+ source_info = (
+ self._create_data_source(self.swift_data_source_with_creds))
self._list_data_sources(source_info)
@test.attr(type='smoke')
def test_swift_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.swift_data_source_with_creds)
+ source_id, source_name = (
+ self._create_data_source(self.swift_data_source_with_creds))
self._get_data_source(source_id, source_name, self.swift_data_source)
@test.attr(type='smoke')
def test_swift_data_source_delete(self):
- source_id = self._create_data_source(
- self.swift_data_source_with_creds)[0]
- self._delete_data_source(source_id)
+ source_id, _ = (
+ self._create_data_source(self.swift_data_source_with_creds))
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
@test.attr(type='smoke')
def test_local_hdfs_data_source_create(self):
@@ -119,15 +113,17 @@
@test.attr(type='smoke')
def test_local_hdfs_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.local_hdfs_data_source)
+ source_id, source_name = (
+ self._create_data_source(self.local_hdfs_data_source))
self._get_data_source(
source_id, source_name, self.local_hdfs_data_source)
@test.attr(type='smoke')
def test_local_hdfs_data_source_delete(self):
- source_id = self._create_data_source(self.local_hdfs_data_source)[0]
- self._delete_data_source(source_id)
+ source_id, _ = self._create_data_source(self.local_hdfs_data_source)
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
@test.attr(type='smoke')
def test_external_hdfs_data_source_create(self):
@@ -140,12 +136,14 @@
@test.attr(type='smoke')
def test_external_hdfs_data_source_get(self):
- source_id, source_name = self._create_data_source(
- self.external_hdfs_data_source)
+ source_id, source_name = (
+ self._create_data_source(self.external_hdfs_data_source))
self._get_data_source(
source_id, source_name, self.external_hdfs_data_source)
@test.attr(type='smoke')
def test_external_hdfs_data_source_delete(self):
- source_id = self._create_data_source(self.external_hdfs_data_source)[0]
- self._delete_data_source(source_id)
+ source_id, _ = self._create_data_source(self.external_hdfs_data_source)
+
+ # delete the data source by id
+ self.client.delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
index 689c1fe..15ee145 100644
--- a/tempest/api/data_processing/test_job_binaries.py
+++ b/tempest/api/data_processing/test_job_binaries.py
@@ -40,8 +40,8 @@
name = data_utils.rand_name('sahara-internal-job-binary')
cls.job_binary_data = 'Some script may be data'
- job_binary_internal = cls.create_job_binary_internal(
- name, cls.job_binary_data)[1]
+ job_binary_internal = (
+ cls.create_job_binary_internal(name, cls.job_binary_data))
cls.internal_db_job_binary = {
'url': 'internal-db://%s' % job_binary_internal['id'],
'description': 'Test job binary',
@@ -50,26 +50,25 @@
def _create_job_binary(self, binary_body, binary_name=None):
"""Creates Job Binary with optional name specified.
- It creates a link to data (jar, pig files, etc.) and ensures response
- status, job binary name and response body. Returns id and name of
- created job binary. Data may not exist when using Swift
- as data storage. In other cases data must exist in storage.
+ It creates a link to data (jar, pig files, etc.), ensures job binary
+ name and response body. Returns id and name of created job binary.
+ Data may not exist when using Swift as data storage.
+ In other cases data must exist in storage.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary')
# create job binary
- resp, body = self.create_job_binary(binary_name, **binary_body)
+ resp_body = self.create_job_binary(binary_name, **binary_body)
# ensure that binary created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(binary_name, body['name'])
+ self.assertEqual(binary_name, resp_body['name'])
if 'swift' in binary_body['url']:
binary_body = self.swift_job_binary
- self.assertDictContainsSubset(binary_body, body)
+ self.assertDictContainsSubset(binary_body, resp_body)
- return body['id'], binary_name
+ return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_swift_job_binary_create(self):
@@ -80,30 +79,27 @@
binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
# check for job binary in list
- resp, binaries = self.client.list_job_binaries()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binaries()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_swift_job_binary_get(self):
- binary_id, binary_name = self._create_job_binary(
- self.swift_job_binary_with_extra)
+ binary_id, binary_name = (
+ self._create_job_binary(self.swift_job_binary_with_extra))
# check job binary fetch by id
- resp, binary = self.client.get_job_binary(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary(binary_id)
self.assertEqual(binary_name, binary['name'])
self.assertDictContainsSubset(self.swift_job_binary, binary)
@test.attr(type='smoke')
def test_swift_job_binary_delete(self):
- binary_id = self._create_job_binary(
- self.swift_job_binary_with_extra)[0]
+ binary_id, _ = (
+ self._create_job_binary(self.swift_job_binary_with_extra))
# delete the job binary by id
- resp = self.client.delete_job_binary(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary(binary_id)
@test.attr(type='smoke')
def test_internal_db_job_binary_create(self):
@@ -114,35 +110,31 @@
binary_info = self._create_job_binary(self.internal_db_job_binary)
# check for job binary in list
- resp, binaries = self.client.list_job_binaries()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binaries()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_internal_db_job_binary_get(self):
- binary_id, binary_name = self._create_job_binary(
- self.internal_db_job_binary)
+ binary_id, binary_name = (
+ self._create_job_binary(self.internal_db_job_binary))
# check job binary fetch by id
- resp, binary = self.client.get_job_binary(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary(binary_id)
self.assertEqual(binary_name, binary['name'])
self.assertDictContainsSubset(self.internal_db_job_binary, binary)
@test.attr(type='smoke')
def test_internal_db_job_binary_delete(self):
- binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+ binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
# delete the job binary by id
- resp = self.client.delete_job_binary(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary(binary_id)
@test.attr(type='smoke')
def test_job_binary_get_data(self):
- binary_id = self._create_job_binary(self.internal_db_job_binary)[0]
+ binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
# get data of job binary by id
- resp, data = self.client.get_job_binary_data(binary_id)
- self.assertEqual(200, resp.status)
+ _, data = self.client.get_job_binary_data(binary_id)
self.assertEqual(data, self.job_binary_data)
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
index 6d59177..45e1140 100644
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -29,23 +29,22 @@
def _create_job_binary_internal(self, binary_name=None):
"""Creates Job Binary Internal with optional name specified.
- It puts data into Sahara database and ensures response status and
- job binary internal name. Returns id and name of created job binary
- internal.
+ It puts data into Sahara database and ensures job binary internal name.
+ Returns id and name of created job binary internal.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary-internal')
# create job binary internal
- resp, body = self.create_job_binary_internal(
- binary_name, self.job_binary_internal_data)
+ resp_body = (
+ self.create_job_binary_internal(binary_name,
+ self.job_binary_internal_data))
# ensure that job binary internal created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(binary_name, body['name'])
+ self.assertEqual(binary_name, resp_body['name'])
- return body['id'], binary_name
+ return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_job_binary_internal_create(self):
@@ -56,8 +55,7 @@
binary_info = self._create_job_binary_internal()
# check for job binary internal in list
- resp, binaries = self.client.list_job_binary_internals()
- self.assertEqual(200, resp.status)
+ _, binaries = self.client.list_job_binary_internals()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@@ -66,23 +64,20 @@
binary_id, binary_name = self._create_job_binary_internal()
# check job binary internal fetch by id
- resp, binary = self.client.get_job_binary_internal(binary_id)
- self.assertEqual(200, resp.status)
+ _, binary = self.client.get_job_binary_internal(binary_id)
self.assertEqual(binary_name, binary['name'])
@test.attr(type='smoke')
def test_job_binary_internal_delete(self):
- binary_id = self._create_job_binary_internal()[0]
+ binary_id, _ = self._create_job_binary_internal()
# delete the job binary internal by id
- resp = self.client.delete_job_binary_internal(binary_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_job_binary_internal(binary_id)
@test.attr(type='smoke')
def test_job_binary_internal_get_data(self):
- binary_id = self._create_job_binary_internal()[0]
+ binary_id, _ = self._create_job_binary_internal()
# get data of job binary internal by id
- resp, data = self.client.get_job_binary_internal_data(binary_id)
- self.assertEqual(200, resp.status)
+ _, data = self.client.get_job_binary_internal_data(binary_id)
self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index 04f98b4..c2c0075 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -43,7 +43,7 @@
def _create_node_group_template(self, template_name=None):
"""Creates Node Group Template with optional name specified.
- It creates template and ensures response status and template name.
+ It creates template, ensures template name and response body.
Returns id and name of created template.
"""
if not template_name:
@@ -51,15 +51,14 @@
template_name = data_utils.rand_name('sahara-ng-template')
# create node group template
- resp, body = self.create_node_group_template(
- template_name, **self.node_group_template)
+ resp_body = self.create_node_group_template(template_name,
+ **self.node_group_template)
# ensure that template created successfully
- self.assertEqual(202, resp.status)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.node_group_template, body)
+ self.assertEqual(template_name, resp_body['name'])
+ self.assertDictContainsSubset(self.node_group_template, resp_body)
- return body['id'], template_name
+ return resp_body['id'], template_name
@test.attr(type='smoke')
def test_node_group_template_create(self):
@@ -70,8 +69,7 @@
template_info = self._create_node_group_template()
# check for node group template in list
- resp, templates = self.client.list_node_group_templates()
- self.assertEqual(200, resp.status)
+ _, templates = self.client.list_node_group_templates()
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
@@ -81,15 +79,13 @@
template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
- resp, template = self.client.get_node_group_template(template_id)
- self.assertEqual(200, resp.status)
+ _, template = self.client.get_node_group_template(template_id)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.node_group_template, template)
@test.attr(type='smoke')
def test_node_group_template_delete(self):
- template_id = self._create_node_group_template()[0]
+ template_id, _ = self._create_node_group_template()
# delete the node group template by id
- resp = self.client.delete_node_group_template(template_id)[0]
- self.assertEqual(204, resp.status)
+ self.client.delete_node_group_template(template_id)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index d643f23..9fd7a17 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -20,10 +20,9 @@
def _list_all_plugin_names(self):
"""Returns all enabled plugin names.
- It ensures response status and main plugins availability.
+ It ensures main plugins availability.
"""
- resp, plugins = self.client.list_plugins()
- self.assertEqual(200, resp.status)
+ _, plugins = self.client.list_plugins()
plugins_names = [plugin['name'] for plugin in plugins]
self.assertIn('vanilla', plugins_names)
self.assertIn('hdp', plugins_names)
@@ -37,14 +36,12 @@
@test.attr(type='smoke')
def test_plugin_get(self):
for plugin_name in self._list_all_plugin_names():
- resp, plugin = self.client.get_plugin(plugin_name)
- self.assertEqual(200, resp.status)
+ _, plugin = self.client.get_plugin(plugin_name)
self.assertEqual(plugin_name, plugin['name'])
for plugin_version in plugin['versions']:
- resp, detailed_plugin = self.client.get_plugin(plugin_name,
- plugin_version)
- self.assertEqual(200, resp.status)
+ _, detailed_plugin = self.client.get_plugin(plugin_name,
+ plugin_version)
self.assertEqual(plugin_name, detailed_plugin['name'])
# check that required image tags contains name and version
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 558575e..3c25819 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -65,6 +65,28 @@
self.assertEqual('false', str(new_user_get['enabled']).lower())
@test.attr(type='gate')
+ def test_update_user_password(self):
+ # Creating User to check password updation
+ u_name = data_utils.rand_name('user')
+ original_password = data_utils.rand_name('pass')
+ _, user = self.client.create_user(
+ u_name, password=original_password)
+ # Delete the User at the end all test methods
+ self.addCleanup(self.client.delete_user, user['id'])
+ # Update user with new password
+ new_password = data_utils.rand_name('pass1')
+ self.client.update_user_password(user['id'], new_password,
+ original_password)
+ resp, body = self.token.auth(user['id'], new_password)
+ self.assertEqual(201, resp.status)
+ subject_token = resp['x-subject-token']
+ # Perform GET Token to verify and confirm password is updated
+ _, token_details = self.client.get_token(subject_token)
+ self.assertEqual(resp['x-subject-token'], subject_token)
+ self.assertEqual(token_details['user']['id'], user['id'])
+ self.assertEqual(token_details['user']['name'], u_name)
+
+ @test.attr(type='gate')
def test_list_user_projects(self):
# List the projects that a user has access upon
assigned_project_ids = list()
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index ae777eb..4226815 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -86,7 +86,8 @@
# Verifying deletion
_, images = self.client.image_list()
- self.assertNotIn(image_id, images)
+ images_id = [item['id'] for item in images]
+ self.assertNotIn(image_id, images_id)
@test.attr(type='gate')
def test_update_image(self):
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index d1a8faf..9fa54b1 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -46,8 +46,7 @@
raise cls.skipException(msg)
cls.identity_admin_client = cls.os_adm.identity_client
- @test.attr(type='gate')
- def test_quotas(self):
+ def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
@@ -56,14 +55,15 @@
description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+
# Change quotas for tenant
- new_quotas = {'network': 0, 'security_group': 0}
resp, quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
self.assertEqual('200', resp['status'])
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Confirm our tenant is listed among tenants with non default quotas
resp, non_default_quotas = self.admin_client.list_quotas()
self.assertEqual('200', resp['status'])
@@ -72,12 +72,14 @@
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
+
+ # Confirm from API quotas were changed as requested for tenant
resp, quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
self.assertEqual('200', resp['status'])
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Reset quotas to default and confirm
resp, body = self.admin_client.reset_quotas(tenant_id)
self.assertEqual('204', resp['status'])
@@ -86,49 +88,14 @@
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
+ @test.attr(type='gate')
+ def test_quotas(self):
+ new_quotas = {'network': 0, 'security_group': 0}
+ self._check_quotas(new_quotas)
+
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
- # Add a tenant to conduct the test
- test_tenant = data_utils.rand_name('test_tenant_')
- test_description = data_utils.rand_name('desc_')
- _, tenant = self.identity_admin_client.create_tenant(
- name=test_tenant,
- description=test_description)
- tenant_id = tenant['id']
- self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
- # Change lbaas quotas for tenant
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
-
- resp, quota_set = self.admin_client.update_quotas(tenant_id,
- **new_quotas)
- self.assertEqual('200', resp['status'])
- self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Confirm our tenant is listed among tenants with non default quotas
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- found = False
- for qs in non_default_quotas['quotas']:
- if qs['tenant_id'] == tenant_id:
- found = True
- self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
- resp, quota_set = self.admin_client.show_quotas(tenant_id)
- quota_set = quota_set['quota']
- self.assertEqual('200', resp['status'])
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Reset quotas to default and confirm
- resp, body = self.admin_client.reset_quotas(tenant_id)
- self.assertEqual('204', resp['status'])
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- for q in non_default_quotas['quotas']:
- self.assertNotEqual(tenant_id, q['tenant_id'])
+ self._check_quotas(new_quotas)
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index bc46901..96e1c50 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -65,7 +65,10 @@
# random_string.yaml specifies a length of 10
random_value = self.get_stack_output(stack_identifier, 'random_value')
- self.assertEqual(10, len(random_value))
+ random_string_template = self.load_template('random_string')
+ expected_length = random_string_template['parameters'][
+ 'random_length']['default']
+ self.assertEqual(expected_length, len(random_value))
@test.attr(type='gate')
def test_files_provider_resource(self):
@@ -90,4 +93,7 @@
# random_string.yaml specifies a length of 10
random_value = self.get_stack_output(stack_identifier, 'random_value')
- self.assertEqual(10, len(random_value))
+ random_string_template = self.load_template('random_string')
+ expected_length = random_string_template['parameters'][
+ 'random_length']['default']
+ self.assertEqual(expected_length, len(random_value))
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 336fc99..e22a08b 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -23,6 +23,8 @@
class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
_tpl_type = 'yaml'
+ _resource = 'resources'
+ _type = 'type'
@classmethod
def setUpClass(cls):
@@ -49,8 +51,15 @@
@test.attr(type='slow')
def test_created_resources(self):
"""Verifies created keypair resource."""
- resources = [('KeyPairSavePrivate', 'OS::Nova::KeyPair'),
- ('KeyPairDontSavePrivate', 'OS::Nova::KeyPair')]
+
+ nova_keypair_template = self.load_template('nova_keypair',
+ ext=self._tpl_type)
+ resources = [('KeyPairSavePrivate',
+ nova_keypair_template[self._resource][
+ 'KeyPairSavePrivate'][self._type]),
+ ('KeyPairDontSavePrivate',
+ nova_keypair_template[self._resource][
+ 'KeyPairDontSavePrivate'][self._type])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
@@ -85,3 +94,5 @@
class NovaKeyPairResourcesAWSTest(NovaKeyPairResourcesYAMLTest):
_tpl_type = 'json'
+ _resource = 'Resources'
+ _type = 'Type'
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 2ba2811..adab8c3 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -49,8 +49,11 @@
def test_created_resources(self):
"""Created stack should be in the list of existing stacks."""
- resources = [('SwiftContainer', 'OS::Swift::Container'),
- ('SwiftContainerWebsite', 'OS::Swift::Container')]
+ swift_basic_template = self.load_template('swift_basic')
+ resources = [('SwiftContainer', swift_basic_template['resources'][
+ 'SwiftContainer']['type']),
+ ('SwiftContainerWebsite', swift_basic_template[
+ 'resources']['SwiftContainerWebsite']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name)
self.assertIsInstance(resource, dict)
@@ -84,10 +87,9 @@
self.assertIn(h, headers)
def test_metadata(self):
- metadatas = {
- "web-index": "index.html",
- "web-error": "error.html"
- }
+ swift_basic_template = self.load_template('swift_basic')
+ metadatas = swift_basic_template['resources']['SwiftContainerWebsite'][
+ 'properties']['X-Container-Meta']
swcont_website = self.test_resources.get(
'SwiftContainerWebsite')['physical_resource_id']
headers, _ = self.container_client.list_container_metadata(
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 2b422fd..b5b2bb1 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -34,19 +34,27 @@
cls.telemetry_client = os.telemetry_client
cls.servers_client = os.servers_client
cls.flavors_client = os.flavors_client
+ cls.image_client = os.image_client
+ cls.image_client_v2 = os.image_client_v2
cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
'disk.ephemeral.size']
+
+ cls.glance_notifications = ['image.update', 'image.upload',
+ 'image.delete']
+
+ cls.glance_v2_notifications = ['image.download', 'image.serve']
+
cls.server_ids = []
cls.alarm_ids = []
+ cls.image_ids = []
@classmethod
def create_alarm(cls, **kwargs):
resp, body = cls.telemetry_client.create_alarm(
name=data_utils.rand_name('telemetry_alarm'),
type='threshold', **kwargs)
- if resp['status'] == '201':
- cls.alarm_ids.append(body['alarm_id'])
+ cls.alarm_ids.append(body['alarm_id'])
return resp, body
@classmethod
@@ -55,8 +63,15 @@
data_utils.rand_name('ceilometer-instance'),
CONF.compute.image_ref, CONF.compute.flavor_ref,
wait_until='ACTIVE')
- if resp['status'] == '202':
- cls.server_ids.append(body['id'])
+ cls.server_ids.append(body['id'])
+ return resp, body
+
+ @classmethod
+ def create_image(cls, client):
+ resp, body = client.create_image(
+ data_utils.rand_name('image'), container_format='bare',
+ disk_format='raw', visibility='private')
+ cls.image_ids.append(body['id'])
return resp, body
@staticmethod
@@ -71,6 +86,7 @@
def tearDownClass(cls):
cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
+ cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
cls.clear_isolated_creds()
super(BaseTelemetryTest, cls).tearDownClass()
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index f401b9b..2a170c7 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -43,6 +43,36 @@
for metric in self.nova_notifications:
self.await_samples(metric, query)
+ @test.attr(type="smoke")
+ @test.services("image")
+ @testtools.skipIf(not CONF.image_feature_enabled.api_v1,
+ "Glance api v1 is disabled")
+ def test_check_glance_v1_notifications(self):
+ _, body = self.create_image(self.image_client)
+ self.image_client.update_image(body['id'], data='data')
+
+ query = 'resource', 'eq', body['id']
+
+ self.image_client.delete_image(body['id'])
+
+ for metric in self.glance_notifications:
+ self.await_samples(metric, query)
+
+ @test.attr(type="smoke")
+ @test.services("image")
+ @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
+ "Glance api v2 is disabled")
+ def test_check_glance_v2_notifications(self):
+ _, body = self.create_image(self.image_client_v2)
+
+ self.image_client_v2.store_image(body['id'], "file")
+ self.image_client_v2.get_image_file(body['id'])
+
+ query = 'resource', 'eq', body['id']
+
+ for metric in self.glance_v2_notifications:
+ self.await_samples(metric, query)
+
class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
_interface = 'xml'
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index e79d23c..d451517 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -36,42 +36,55 @@
cls.volume_client = cls.os_adm.volumes_client
cls.volume_type_id_list = []
- cls.volume_id_list = []
+ cls.volume_id_list_with_prefix = []
+ cls.volume_id_list_without_prefix = []
- # Volume/Type creation (uses backend1_name)
- type1_name = data_utils.rand_name('Type-')
- vol1_name = data_utils.rand_name('Volume-')
- extra_specs1 = {"volume_backend_name": cls.backend1_name}
- resp, cls.type1 = cls.client.create_volume_type(
- type1_name, extra_specs=extra_specs1)
- cls.volume_type_id_list.append(cls.type1['id'])
-
- resp, cls.volume1 = cls.volume_client.create_volume(
- size=1, display_name=vol1_name, volume_type=type1_name)
- cls.volume_id_list.append(cls.volume1['id'])
- cls.volume_client.wait_for_volume_status(cls.volume1['id'],
- 'available')
+ # Volume/Type creation (uses volume_backend_name)
+ cls._create_type_and_volume(cls.backend1_name, False)
+ # Volume/Type creation (uses capabilities:volume_backend_name)
+ cls._create_type_and_volume(cls.backend1_name, True)
if cls.backend1_name != cls.backend2_name:
# Volume/Type creation (uses backend2_name)
- type2_name = data_utils.rand_name('Type-')
- vol2_name = data_utils.rand_name('Volume-')
- extra_specs2 = {"volume_backend_name": cls.backend2_name}
- resp, cls.type2 = cls.client.create_volume_type(
- type2_name, extra_specs=extra_specs2)
- cls.volume_type_id_list.append(cls.type2['id'])
+ cls._create_type_and_volume(cls.backend2_name, False)
+ # Volume/Type creation (uses capabilities:volume_backend_name)
+ cls._create_type_and_volume(cls.backend2_name, True)
- resp, cls.volume2 = cls.volume_client.create_volume(
- size=1, display_name=vol2_name, volume_type=type2_name)
- cls.volume_id_list.append(cls.volume2['id'])
- cls.volume_client.wait_for_volume_status(cls.volume2['id'],
- 'available')
+ @classmethod
+ def _create_type_and_volume(self, backend_name_key, with_prefix):
+ # Volume/Type creation
+ type_name = data_utils.rand_name('Type')
+ vol_name = data_utils.rand_name('Volume')
+ spec_key_with_prefix = "capabilities:volume_backend_name"
+ spec_key_without_prefix = "volume_backend_name"
+ if with_prefix:
+ extra_specs = {spec_key_with_prefix: backend_name_key}
+ else:
+ extra_specs = {spec_key_without_prefix: backend_name_key}
+ resp, self.type = self.client.create_volume_type(
+ type_name, extra_specs=extra_specs)
+ self.volume_type_id_list.append(self.type['id'])
+
+ resp, self.volume = self.volume_client.create_volume(
+ size=1, display_name=vol_name, volume_type=type_name)
+ self.volume_client.wait_for_volume_status(
+ self.volume['id'], 'available')
+ if with_prefix:
+ self.volume_id_list_with_prefix.append(self.volume['id'])
+ else:
+ self.volume_id_list_without_prefix.append(
+ self.volume['id'])
@classmethod
def tearDownClass(cls):
# volumes deletion
- volume_id_list = getattr(cls, 'volume_id_list', [])
- for volume_id in volume_id_list:
+ vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
+ for volume_id in vid_prefix:
+ cls.volume_client.delete_volume(volume_id)
+ cls.volume_client.wait_for_resource_deletion(volume_id)
+
+ vid_no_pre = getattr(cls, 'volume_id_list_without_prefix', [])
+ for volume_id in vid_no_pre:
cls.volume_client.delete_volume(volume_id)
cls.volume_client.wait_for_resource_deletion(volume_id)
@@ -84,32 +97,57 @@
@test.attr(type='smoke')
def test_backend_name_reporting(self):
+ # get volume id which created by type without prefix
+ volume_id = self.volume_id_list_without_prefix[0]
+ self._test_backend_name_reporting_by_volume_id(volume_id)
+
+ @test.attr(type='smoke')
+ def test_backend_name_reporting_with_prefix(self):
+ # get volume id which created by type with prefix
+ volume_id = self.volume_id_list_with_prefix[0]
+ self._test_backend_name_reporting_by_volume_id(volume_id)
+
+ @test.attr(type='gate')
+ def test_backend_name_distinction(self):
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
+ # get volume id which created by type without prefix
+ volume1_id = self.volume_id_list_without_prefix[0]
+ volume2_id = self.volume_id_list_without_prefix[1]
+ self._test_backend_name_distinction(volume1_id, volume2_id)
+
+ @test.attr(type='gate')
+ def test_backend_name_distinction_with_prefix(self):
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
+ # get volume id which created by type without prefix
+ volume1_id = self.volume_id_list_with_prefix[0]
+ volume2_id = self.volume_id_list_with_prefix[1]
+ self._test_backend_name_distinction(volume1_id, volume2_id)
+
+ def _test_backend_name_reporting_by_volume_id(self, volume_id):
# this test checks if os-vol-attr:host is populated correctly after
# the multi backend feature has been enabled
# if multi-backend is enabled: os-vol-attr:host should be like:
# host@backend_name
- resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
volume1_host = volume['os-vol-host-attr:host']
msg = ("multi-backend reporting incorrect values for volume %s" %
- self.volume1['id'])
+ volume_id)
self.assertTrue(len(volume1_host.split("@")) > 1, msg)
- @test.attr(type='gate')
- def test_backend_name_distinction(self):
+ def _test_backend_name_distinction(self, volume1_id, volume2_id):
# this test checks that the two volumes created at setUp don't
# belong to the same backend (if they are, than the
# volume backend distinction is not working properly)
- if self.backend1_name == self.backend2_name:
- raise self.skipException("backends configured with same name")
-
- resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(volume1_id)
volume1_host = volume['os-vol-host-attr:host']
- resp, volume = self.volume_client.get_volume(self.volume2['id'])
+ resp, volume = self.volume_client.get_volume(volume2_id)
volume2_host = volume['os-vol-host-attr:host']
msg = ("volumes %s and %s were created in the same backend" %
- (self.volume1['id'], self.volume2['id']))
+ (volume1_id, volume2_id))
self.assertNotEqual(volume1_host, volume2_host, msg)
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index d7b4a16..02f8c05 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -19,6 +19,7 @@
import tempest.cli.output_parser
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -130,10 +131,10 @@
cmd, stdout=stdout, stderr=stderr)
result, result_err = proc.communicate()
if not fail_ok and proc.returncode != 0:
- raise CommandFailed(proc.returncode,
- cmd,
- result,
- result_err)
+ raise exceptions.CommandFailed(proc.returncode,
+ cmd,
+ result,
+ result_err)
return result
def assertTableStruct(self, items, field_names):
@@ -146,17 +147,3 @@
self.assertTrue(lines[0].startswith(beginning),
msg=('Beginning of first line has invalid content: %s'
% lines[:3]))
-
-
-class CommandFailed(Exception):
- def __init__(self, returncode, cmd, output, stderr):
- super(CommandFailed, self).__init__()
- self.returncode = returncode
- self.cmd = cmd
- self.stdout = output
- self.stderr = stderr
-
- def __str__(self):
- return ("Command '%s' returned non-zero exit status %d.\n"
- "stdout:\n%s\n"
- "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index 9a6b159..04971c1 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -19,6 +19,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -40,7 +41,7 @@
super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
def test_cinder_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.cinder,
'this-does-not-exist')
@@ -65,7 +66,7 @@
'Attached to'])
self.cinder('list', params='--all-tenants 1')
self.cinder('list', params='--all-tenants 0')
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.cinder,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 3fb1120..90cdc55 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -17,6 +17,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
@@ -40,7 +41,7 @@
super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
def test_glance_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.glance,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index f8dcdba..9218fcd 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -17,6 +17,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
@@ -34,7 +35,7 @@
"""
def test_admin_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.keystone,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 2643596..87f6b67 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -17,6 +17,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest import test
@@ -42,7 +43,7 @@
@test.attr(type='smoke')
def test_neutron_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
@@ -88,7 +89,7 @@
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
- except cli.CommandFailed as e:
+ except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index 70eb9ef..7085cc9 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -17,6 +17,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -47,7 +48,7 @@
super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'this-does-nova-exist')
@@ -84,11 +85,11 @@
self.nova('endpoints')
def test_admin_flavor_acces_list(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'flavor-access-list')
# Failed to get access list for public flavor type
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'flavor-access-list',
params='--flavor m1.tiny')
@@ -125,7 +126,7 @@
self.nova('list')
self.nova('list', params='--all-tenants 1')
self.nova('list', params='--all-tenants 0')
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.nova,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index 67c19d8..dae0cf8 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -15,6 +15,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -46,7 +47,7 @@
super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.nova_manage,
'this-does-nova-exist')
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index 773921a..2c6e0e2 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -17,6 +17,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -41,7 +42,7 @@
@test.attr(type='negative')
def test_sahara_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.sahara,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/test_swift.py
index c778542..069a384 100644
--- a/tempest/cli/simple_read_only/test_swift.py
+++ b/tempest/cli/simple_read_only/test_swift.py
@@ -17,6 +17,7 @@
from tempest import cli
from tempest import config
+from tempest import exceptions
CONF = config.CONF
@@ -37,7 +38,7 @@
super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
def test_swift_fake_action(self):
- self.assertRaises(cli.CommandFailed,
+ self.assertRaises(exceptions.CommandFailed,
self.swift,
'this-does-not-exist')
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 96bbd03..c1a2e46 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -244,9 +244,13 @@
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
- self.assertEqual(os.system("ping -c 1 " + addr), 0,
- "Server %s is not pingable at %s" % (
- server['name'], addr))
+ for count in range(60):
+ return_code = os.system("ping -c1 " + addr)
+ if return_code is 0:
+ break
+ self.assertNotEqual(count, 59,
+ "Server %s is not pingable at %s" % (
+ server['name'], addr))
def check_volumes(self):
"""Check that the volumes are still there and attached."""
@@ -308,6 +312,7 @@
def create_images(images):
if not images:
return
+ LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
@@ -315,6 +320,7 @@
r, body = client.images.image_list()
names = [x['name'] for x in body]
if image['name'] in names:
+ LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
@@ -372,15 +378,37 @@
def create_servers(servers):
if not servers:
return
+ LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
+ LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
- client.servers.create_server(server['name'], image_id, flavor_id)
+ resp, body = client.servers.create_server(server['name'], image_id,
+ flavor_id)
+ server_id = body['id']
+ client.servers.wait_for_server_status(server_id, 'ACTIVE')
+
+
+def destroy_servers(servers):
+ if not servers:
+ return
+ LOG.info("Destroying servers")
+ for server in servers:
+ client = client_for_user(server['owner'])
+
+ response = _get_server_by_name(client, server['name'])
+ if not response:
+ LOG.info("Server '%s' does not exist" % server['name'])
+ continue
+
+ client.servers.delete_server(response['id'])
+ client.servers.wait_for_server_termination(response['id'],
+ ignore_error=True)
#######################
@@ -441,6 +469,23 @@
# attach_volumes(RES['volumes'])
+def destroy_resources():
+ LOG.info("Destroying Resources")
+ # Destroy in inverse order of create
+
+ # Future
+ # detach_volumes
+ # destroy_volumes
+
+ destroy_servers(RES['servers'])
+ LOG.warn("Destroy mode incomplete")
+ # destroy_images
+ # destroy_objects
+
+ # destroy_users
+ # destroy_tenants
+
+
def get_options():
global OPTS
parser = argparse.ArgumentParser(
@@ -512,12 +557,16 @@
if OPTS.mode == 'create':
create_resources()
+ # Make sure the resources we just created actually work
+ checker = JavelinCheck(USERS, RES)
+ checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
- LOG.warn("Destroy mode not yet implemented")
+ collect_users(RES['users'])
+ destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 4eb1cea..9d443cc 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -211,3 +211,17 @@
class InvalidStructure(TempestException):
message = "Invalid structure of table with details"
+
+
+class CommandFailed(Exception):
+ def __init__(self, returncode, cmd, output, stderr):
+ super(CommandFailed, self).__init__()
+ self.returncode = returncode
+ self.cmd = cmd
+ self.stdout = output
+ self.stderr = stderr
+
+ def __str__(self):
+ return ("Command '%s' returned non-zero exit status %d.\n"
+ "stdout:\n%s\n"
+ "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index c2c7fd1..1fe0cf1 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -25,29 +25,42 @@
super(DataProcessingClient, self).__init__(auth_provider)
self.service = CONF.data_processing.catalog_type
- @classmethod
- def _request_and_parse(cls, req_fun, uri, res_name, *args, **kwargs):
- """Make a request using specified req_fun and parse response.
+ def _request_and_check_resp(self, request_func, uri, resp_status):
+ """Make a request using specified request_func and check response
+ status code.
+
+ It returns pair: resp and response body.
+ """
+ resp, body = request_func(uri)
+ self.expected_success(resp_status, resp.status)
+ return resp, body
+
+ def _request_check_and_parse_resp(self, request_func, uri, resp_status,
+ resource_name, *args, **kwargs):
+ """Make a request using specified request_func, check response status
+ code and parse response body.
It returns pair: resp and parsed resource(s) body.
"""
- resp, body = req_fun(uri, headers={
- 'Content-Type': 'application/json'
- }, *args, **kwargs)
+ headers = {'Content-Type': 'application/json'}
+ resp, body = request_func(uri, headers=headers, *args, **kwargs)
+ self.expected_success(resp_status, resp.status)
body = json.loads(body)
- return resp, body[res_name]
+ return resp, body[resource_name]
def list_node_group_templates(self):
"""List all node group templates for a user."""
uri = 'node-group-templates'
- return self._request_and_parse(self.get, uri, 'node_group_templates')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'node_group_templates')
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
uri = 'node-group-templates/%s' % tmpl_id
- return self._request_and_parse(self.get, uri, 'node_group_template')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
node_processes, flavor_id,
@@ -67,20 +80,22 @@
'flavor_id': flavor_id,
'node_configs': node_configs or dict(),
})
- return self._request_and_parse(self.post, uri, 'node_group_template',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri, 202,
+ 'node_group_template',
+ body=json.dumps(body))
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
uri = 'node-group-templates/%s' % tmpl_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_plugins(self):
"""List all enabled plugins."""
uri = 'plugins'
- return self._request_and_parse(self.get, uri, 'plugins')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'plugins')
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
@@ -88,19 +103,21 @@
uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
- return self._request_and_parse(self.get, uri, 'plugin')
+ return self._request_check_and_parse_resp(self.get, uri, 200, 'plugin')
def list_cluster_templates(self):
"""List all cluster templates for a user."""
uri = 'cluster-templates'
- return self._request_and_parse(self.get, uri, 'cluster_templates')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'cluster_templates')
def get_cluster_template(self, tmpl_id):
"""Returns the details of a single cluster template."""
uri = 'cluster-templates/%s' % tmpl_id
- return self._request_and_parse(self.get, uri, 'cluster_template')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'cluster_template')
def create_cluster_template(self, name, plugin_name, hadoop_version,
node_groups, cluster_configs=None,
@@ -119,26 +136,29 @@
'node_groups': node_groups,
'cluster_configs': cluster_configs or dict(),
})
- return self._request_and_parse(self.post, uri, 'cluster_template',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri, 202,
+ 'cluster_template',
+ body=json.dumps(body))
def delete_cluster_template(self, tmpl_id):
"""Deletes the specified cluster template by id."""
uri = 'cluster-templates/%s' % tmpl_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_data_sources(self):
"""List all data sources for a user."""
uri = 'data-sources'
- return self._request_and_parse(self.get, uri, 'data_sources')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'data_sources')
def get_data_source(self, source_id):
"""Returns the details of a single data source."""
uri = 'data-sources/%s' % source_id
- return self._request_and_parse(self.get, uri, 'data_source')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'data_source')
def create_data_source(self, name, data_source_type, url, **kwargs):
"""Creates data source with specified params.
@@ -153,57 +173,62 @@
'type': data_source_type,
'url': url
})
- return self._request_and_parse(self.post, uri, 'data_source',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri,
+ 202, 'data_source',
+ body=json.dumps(body))
def delete_data_source(self, source_id):
"""Deletes the specified data source by id."""
uri = 'data-sources/%s' % source_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def list_job_binary_internals(self):
"""List all job binary internals for a user."""
uri = 'job-binary-internals'
- return self._request_and_parse(self.get, uri, 'binaries')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'binaries')
def get_job_binary_internal(self, job_binary_id):
"""Returns the details of a single job binary internal."""
uri = 'job-binary-internals/%s' % job_binary_id
- return self._request_and_parse(self.get, uri, 'job_binary_internal')
+ return self._request_check_and_parse_resp(self.get, uri,
+ 200, 'job_binary_internal')
def create_job_binary_internal(self, name, data):
"""Creates job binary internal with specified params."""
uri = 'job-binary-internals/%s' % name
- return self._request_and_parse(self.put, uri, 'job_binary_internal',
- data)
+ return self._request_check_and_parse_resp(self.put, uri, 202,
+ 'job_binary_internal', data)
def delete_job_binary_internal(self, job_binary_id):
"""Deletes the specified job binary internal by id."""
uri = 'job-binary-internals/%s' % job_binary_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_internal_data(self, job_binary_id):
"""Returns data of a single job binary internal."""
uri = 'job-binary-internals/%s/data' % job_binary_id
- return self.get(uri)
+ return self._request_and_check_resp(self.get, uri, 200)
def list_job_binaries(self):
"""List all job binaries for a user."""
uri = 'job-binaries'
- return self._request_and_parse(self.get, uri, 'binaries')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'binaries')
def get_job_binary(self, job_binary_id):
"""Returns the details of a single job binary."""
uri = 'job-binaries/%s' % job_binary_id
- return self._request_and_parse(self.get, uri, 'job_binary')
+ return self._request_check_and_parse_resp(self.get,
+ uri, 200, 'job_binary')
def create_job_binary(self, name, url, extra=None, **kwargs):
"""Creates job binary with specified params.
@@ -218,17 +243,18 @@
'url': url,
'extra': extra or dict(),
})
- return self._request_and_parse(self.post, uri, 'job_binary',
- body=json.dumps(body))
+ return self._request_check_and_parse_resp(self.post, uri,
+ 202, 'job_binary',
+ body=json.dumps(body))
def delete_job_binary(self, job_binary_id):
"""Deletes the specified job binary by id."""
uri = 'job-binaries/%s' % job_binary_id
- return self.delete(uri)
+ return self._request_and_check_resp(self.delete, uri, 204)
def get_job_binary_data(self, job_binary_id):
"""Returns data of a single job binary."""
uri = 'job-binaries/%s/data' % job_binary_id
- return self.get(uri)
+ return self._request_and_check_resp(self.get, uri, 200)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 593bd15..0188c2a 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -77,6 +77,17 @@
body = json.loads(body)
return resp, body['user']
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = {
+ 'password': password,
+ 'original_password': original_password
+ }
+ update_user = json.dumps({'user': update_user})
+ resp, _ = self.post('users/%s/password' % user_id, update_user)
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 3790f13..f3e084e 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -139,6 +139,17 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = common.Element("user",
+ xmlns=XMLNS,
+ password=password,
+ original_password=original_password)
+ resp, _ = self.post('users/%s/password' % user_id,
+ str(common.Document(update_user)))
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
diff --git a/tempest/tests/cli/test_command_failed.py b/tempest/tests/cli/test_command_failed.py
index c539ac6..36a4fc8 100644
--- a/tempest/tests/cli/test_command_failed.py
+++ b/tempest/tests/cli/test_command_failed.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import cli
+from tempest import exceptions
from tempest.tests import base
@@ -22,8 +22,8 @@
stdout = "output"
stderr = "error"
try:
- raise cli.CommandFailed(returncode, cmd, stdout, stderr)
- except cli.CommandFailed as e:
+ raise exceptions.CommandFailed(returncode, cmd, stdout, stderr)
+ except exceptions.CommandFailed as e:
self.assertIn(str(returncode), str(e))
self.assertIn(cmd, str(e))
self.assertIn(stdout, str(e))